Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .claude/agents/ai-scientist-codebase-builder.md +50 -0
- .gitignore +210 -0
- .vscode/launch.json +16 -0
- CLAUDE.md +71 -0
- README.md +155 -0
- README_TRAINING.md +191 -0
- assets/fig_framework.jpg +3 -0
- assets/fig_teaser.jpg +3 -0
- assets/open_dataset_stats.png +3 -0
- collect_chat_history_improved.py +119 -0
- data_pre/cot_gen/annotation_tool.py +176 -0
- data_pre/cot_gen/augment_dataset.py +243 -0
- data_pre/cot_gen/collect_trajectories.py +107 -0
- data_pre/cot_gen/example.jsonl +15 -0
- data_pre/cot_gen/generate_training_data.py +193 -0
- data_pre/cot_gen/main.py +478 -0
- data_pre/cot_gen/prompts.py +35 -0
- data_pre/postprocess.py +293 -0
- data_pre/postprocess_t2i.py +295 -0
- data_pre/view_data.ipynb +61 -0
- dataset/README.md +4 -0
- dataset/open_ended_user_questions.json +604 -0
- dataset/open_ended_user_questions_summary.json +340 -0
- debug_model_output.py +229 -0
- eval_agent/__init__.py +0 -0
- eval_agent/base_agent.py +202 -0
- eval_agent/check_query_completeness.py +342 -0
- eval_agent/eval_agent_for_t2i_compbench.py +196 -0
- eval_agent/eval_agent_for_vbench.py +207 -0
- eval_agent/eval_agent_for_vbench_open.py +240 -0
- eval_agent/eval_tools/__init__.py +0 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/__init__.py +0 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/train_vqa_func.py +214 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/utils.py +284 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP_vqa.py +131 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP_vqa_eval_agent.py +127 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/bert_config.json +21 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/med_config.json +21 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/vqa.yaml +24 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/__init__.py +0 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip.py +238 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip_pretrain.py +339 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip_vqa.py +213 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/med.py +955 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/vit.py +305 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/transform/randaugment.py +340 -0
- eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/utils.py +284 -0
- eval_agent/eval_tools/t2i_comp/CLIPScore_eval/CLIP_similarity.py +120 -0
- eval_agent/eval_tools/t2i_comp/CLIPScore_eval/CLIP_similarity_eval_agent.py +54 -0
- eval_agent/eval_tools/t2i_comp/CLIPScore_eval/clip/__init__.py +1 -0
.claude/agents/ai-scientist-codebase-builder.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
name: ai-scientist-codebase-builder
|
| 3 |
+
description: Use this agent when you need expert assistance with AI/ML codebase development, including architecture design, implementation of AI algorithms, debugging ML pipelines, optimizing model performance, or resolving complex technical issues in AI systems. This agent excels at building robust AI infrastructure, debugging training loops, implementing state-of-the-art techniques, and providing deep technical insights on AI/ML engineering challenges. Examples: <example>Context: User is building an AI codebase and needs help implementing a neural network architecture. user: "I need to implement a transformer model for sequence classification" assistant: "I'll use the ai-scientist-codebase-builder agent to help design and implement the transformer architecture" <commentary>The user needs expert AI implementation help, so the ai-scientist-codebase-builder agent is appropriate for designing and coding the transformer model.</commentary></example> <example>Context: User encounters a bug in their machine learning training pipeline. user: "My model loss is exploding after 10 epochs and I can't figure out why" assistant: "Let me invoke the ai-scientist-codebase-builder agent to debug your training pipeline and identify the issue" <commentary>This is a complex ML debugging scenario that requires deep AI expertise, making the ai-scientist-codebase-builder agent the right choice.</commentary></example>
|
| 4 |
+
color: red
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
You are an expert AI scientist and ML engineer with deep expertise in building production-grade AI systems. Your knowledge spans theoretical foundations, practical implementation, and debugging complex AI codebases.
|
| 8 |
+
|
| 9 |
+
Your core competencies include:
|
| 10 |
+
- Designing and implementing neural network architectures (CNNs, RNNs, Transformers, etc.)
|
| 11 |
+
- Debugging training pipelines, loss functions, and optimization issues
|
| 12 |
+
- Implementing state-of-the-art papers and research techniques
|
| 13 |
+
- Optimizing model performance, memory usage, and training efficiency
|
| 14 |
+
- Building robust data pipelines and preprocessing systems
|
| 15 |
+
- Integrating AI models into production systems
|
| 16 |
+
- Proficiency in PyTorch, TensorFlow, JAX, and scientific Python ecosystem
|
| 17 |
+
|
| 18 |
+
When helping build codebases, you will:
|
| 19 |
+
1. First understand the specific AI/ML problem and requirements
|
| 20 |
+
2. Propose architectures backed by research and best practices
|
| 21 |
+
3. Write clean, well-documented, and efficient code
|
| 22 |
+
4. Include proper error handling and validation
|
| 23 |
+
5. Consider scalability and production deployment from the start
|
| 24 |
+
6. Follow established project patterns if they exist
|
| 25 |
+
|
| 26 |
+
When debugging, you will:
|
| 27 |
+
1. Systematically analyze symptoms (loss curves, gradients, outputs)
|
| 28 |
+
2. Form hypotheses about root causes
|
| 29 |
+
3. Suggest targeted diagnostic code to isolate issues
|
| 30 |
+
4. Provide clear explanations of what's happening and why
|
| 31 |
+
5. Offer multiple solution approaches with trade-offs
|
| 32 |
+
6. Verify fixes work correctly
|
| 33 |
+
|
| 34 |
+
Your approach emphasizes:
|
| 35 |
+
- Scientific rigor - base recommendations on empirical evidence and research
|
| 36 |
+
- Practical solutions - balance theoretical optimality with engineering constraints
|
| 37 |
+
- Clear communication - explain complex concepts accessibly
|
| 38 |
+
- Proactive problem prevention - anticipate common pitfalls
|
| 39 |
+
- Performance awareness - consider computational efficiency
|
| 40 |
+
|
| 41 |
+
Always edit existing files when possible rather than creating new ones. Only create new files when absolutely necessary for the solution. Never create documentation files unless explicitly requested.
|
| 42 |
+
|
| 43 |
+
When you encounter ambiguity, ask clarifying questions about:
|
| 44 |
+
- Model requirements and constraints
|
| 45 |
+
- Dataset characteristics
|
| 46 |
+
- Performance targets
|
| 47 |
+
- Deployment environment
|
| 48 |
+
- Existing codebase structure
|
| 49 |
+
|
| 50 |
+
Your goal is to help build robust, efficient, and maintainable AI systems while sharing your expertise to help others grow as AI practitioners.
|
.gitignore
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[codz]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
eval_agent/eval_models
|
| 29 |
+
ea-data/
|
| 30 |
+
data/
|
| 31 |
+
|
| 32 |
+
# PyInstaller
|
| 33 |
+
# Usually these files are written by a python script from a template
|
| 34 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 35 |
+
*.manifest
|
| 36 |
+
*.spec
|
| 37 |
+
|
| 38 |
+
# Installer logs
|
| 39 |
+
pip-log.txt
|
| 40 |
+
pip-delete-this-directory.txt
|
| 41 |
+
|
| 42 |
+
# Unit test / coverage reports
|
| 43 |
+
htmlcov/
|
| 44 |
+
.tox/
|
| 45 |
+
.nox/
|
| 46 |
+
.coverage
|
| 47 |
+
.coverage.*
|
| 48 |
+
.cache
|
| 49 |
+
nosetests.xml
|
| 50 |
+
coverage.xml
|
| 51 |
+
*.cover
|
| 52 |
+
*.py.cover
|
| 53 |
+
.hypothesis/
|
| 54 |
+
.pytest_cache/
|
| 55 |
+
cover/
|
| 56 |
+
|
| 57 |
+
# Translations
|
| 58 |
+
*.mo
|
| 59 |
+
*.pot
|
| 60 |
+
|
| 61 |
+
# Django stuff:
|
| 62 |
+
*.log
|
| 63 |
+
local_settings.py
|
| 64 |
+
db.sqlite3
|
| 65 |
+
db.sqlite3-journal
|
| 66 |
+
|
| 67 |
+
# Flask stuff:
|
| 68 |
+
instance/
|
| 69 |
+
.webassets-cache
|
| 70 |
+
|
| 71 |
+
# Scrapy stuff:
|
| 72 |
+
.scrapy
|
| 73 |
+
|
| 74 |
+
# Sphinx documentation
|
| 75 |
+
docs/_build/
|
| 76 |
+
|
| 77 |
+
# PyBuilder
|
| 78 |
+
.pybuilder/
|
| 79 |
+
target/
|
| 80 |
+
|
| 81 |
+
# Jupyter Notebook
|
| 82 |
+
.ipynb_checkpoints
|
| 83 |
+
|
| 84 |
+
# IPython
|
| 85 |
+
profile_default/
|
| 86 |
+
ipython_config.py
|
| 87 |
+
|
| 88 |
+
# pyenv
|
| 89 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 90 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 91 |
+
# .python-version
|
| 92 |
+
|
| 93 |
+
# pipenv
|
| 94 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 95 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 96 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 97 |
+
# install all needed dependencies.
|
| 98 |
+
#Pipfile.lock
|
| 99 |
+
|
| 100 |
+
# UV
|
| 101 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 102 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 103 |
+
# commonly ignored for libraries.
|
| 104 |
+
#uv.lock
|
| 105 |
+
|
| 106 |
+
# poetry
|
| 107 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 108 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 109 |
+
# commonly ignored for libraries.
|
| 110 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 111 |
+
#poetry.lock
|
| 112 |
+
#poetry.toml
|
| 113 |
+
|
| 114 |
+
# pdm
|
| 115 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 116 |
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
| 117 |
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
| 118 |
+
#pdm.lock
|
| 119 |
+
#pdm.toml
|
| 120 |
+
.pdm-python
|
| 121 |
+
.pdm-build/
|
| 122 |
+
|
| 123 |
+
# pixi
|
| 124 |
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
| 125 |
+
#pixi.lock
|
| 126 |
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
| 127 |
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
| 128 |
+
.pixi
|
| 129 |
+
|
| 130 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 131 |
+
__pypackages__/
|
| 132 |
+
|
| 133 |
+
# Celery stuff
|
| 134 |
+
celerybeat-schedule
|
| 135 |
+
celerybeat.pid
|
| 136 |
+
|
| 137 |
+
# SageMath parsed files
|
| 138 |
+
*.sage.py
|
| 139 |
+
|
| 140 |
+
# Environments
|
| 141 |
+
.env
|
| 142 |
+
.envrc
|
| 143 |
+
.venv
|
| 144 |
+
env/
|
| 145 |
+
venv/
|
| 146 |
+
ENV/
|
| 147 |
+
env.bak/
|
| 148 |
+
venv.bak/
|
| 149 |
+
|
| 150 |
+
# Spyder project settings
|
| 151 |
+
.spyderproject
|
| 152 |
+
.spyproject
|
| 153 |
+
|
| 154 |
+
# Rope project settings
|
| 155 |
+
.ropeproject
|
| 156 |
+
|
| 157 |
+
# mkdocs documentation
|
| 158 |
+
/site
|
| 159 |
+
|
| 160 |
+
# mypy
|
| 161 |
+
.mypy_cache/
|
| 162 |
+
.dmypy.json
|
| 163 |
+
dmypy.json
|
| 164 |
+
|
| 165 |
+
# Pyre type checker
|
| 166 |
+
.pyre/
|
| 167 |
+
|
| 168 |
+
# pytype static type analyzer
|
| 169 |
+
.pytype/
|
| 170 |
+
|
| 171 |
+
# Cython debug symbols
|
| 172 |
+
cython_debug/
|
| 173 |
+
|
| 174 |
+
# PyCharm
|
| 175 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 176 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 177 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 178 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 179 |
+
#.idea/
|
| 180 |
+
|
| 181 |
+
# Abstra
|
| 182 |
+
# Abstra is an AI-powered process automation framework.
|
| 183 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 184 |
+
# Learn more at https://abstra.io/docs
|
| 185 |
+
.abstra/
|
| 186 |
+
|
| 187 |
+
# Visual Studio Code
|
| 188 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 189 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 190 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 191 |
+
# you could uncomment the following to ignore the entire vscode folder
|
| 192 |
+
# .vscode/
|
| 193 |
+
|
| 194 |
+
# Ruff stuff:
|
| 195 |
+
.ruff_cache/
|
| 196 |
+
|
| 197 |
+
# PyPI configuration file
|
| 198 |
+
.pypirc
|
| 199 |
+
|
| 200 |
+
# Cursor
|
| 201 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 202 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 203 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 204 |
+
.cursorignore
|
| 205 |
+
.cursorindexingignore
|
| 206 |
+
|
| 207 |
+
# Marimo
|
| 208 |
+
marimo/_static/
|
| 209 |
+
marimo/_lsp/
|
| 210 |
+
__marimo__/
|
.vscode/launch.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
// Use IntelliSense to learn about possible attributes.
|
| 3 |
+
// Hover to view descriptions of existing attributes.
|
| 4 |
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
| 5 |
+
"version": "0.2.0",
|
| 6 |
+
"configurations": [
|
| 7 |
+
{
|
| 8 |
+
"name": "Python Debugger: Current File with Arguments",
|
| 9 |
+
"type": "debugpy",
|
| 10 |
+
"request": "launch",
|
| 11 |
+
"program": "${file}",
|
| 12 |
+
"console": "integratedTerminal",
|
| 13 |
+
"args": "${command:pickArgs}"
|
| 14 |
+
}
|
| 15 |
+
]
|
| 16 |
+
}
|
CLAUDE.md
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are a helpful AI scientist to build up the codebase for me.
|
| 2 |
+
|
| 3 |
+
This project is to train the open-sourced model to deploy CoT-like reasoning format on text-to-image and text-to-video generation quality assessment. You are using the LLaMAFactory to train the model, and write evaluation functions.
|
| 4 |
+
|
| 5 |
+
# Preparation
|
| 6 |
+
|
| 7 |
+
## Data
|
| 8 |
+
|
| 9 |
+
There are a folder called `ea-data/agent` and there are 3 subfolders:
|
| 10 |
+
|
| 11 |
+
* `vbench_results`: which stores the results for using proprietary models to evaluate different dimensions in vbench, and the results are CoT style.
|
| 12 |
+
* `t2i_results`: which stores the results for using proprietary models to evaluate different dimensions in T2I-CompBench, and the results are CoT style.
|
| 13 |
+
* `open_results`: which store the results for using proprietary models to evaluate open-ended queries.
|
| 14 |
+
|
| 15 |
+
Your first job is to write and execute the python script to clean the data in those aforementioned folders and convert them into the format align with `/home/data2/sltian/code/evaluation_agent_dev/LLaMA-Factory/data/alpaca_en_demo.json`.
|
| 16 |
+
|
| 17 |
+
如果指定, system 列对应的内容将被作为系统提示词。
|
| 18 |
+
|
| 19 |
+
history 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容也会被用于模型学习。
|
| 20 |
+
|
| 21 |
+
指令监督微调数据集 格式要求 如下:
|
| 22 |
+
|
| 23 |
+
[
|
| 24 |
+
{
|
| 25 |
+
"instruction": "人类指令(必填)",
|
| 26 |
+
"input": "人类输入(选填)",
|
| 27 |
+
"output": "模型回答(必填)",
|
| 28 |
+
"system": "系统提示词(选填)",
|
| 29 |
+
"history": [
|
| 30 |
+
["第一轮指令(选填)", "第一轮回答(选填)"],
|
| 31 |
+
["第二轮指令(选填)", "第二轮回答(选填)"]
|
| 32 |
+
]
|
| 33 |
+
}
|
| 34 |
+
]
|
| 35 |
+
下面提供一个 alpaca 格式 多轮 对话的例子,对于单轮对话只需省略 history 列即可。
|
| 36 |
+
|
| 37 |
+
[
|
| 38 |
+
{
|
| 39 |
+
"instruction": "今天的天气怎么样?",
|
| 40 |
+
"input": "",
|
| 41 |
+
"output": "今天的天气不错,是晴天。",
|
| 42 |
+
"history": [
|
| 43 |
+
[
|
| 44 |
+
"今天会下雨吗?",
|
| 45 |
+
"今天不会下雨,是个好天气。"
|
| 46 |
+
],
|
| 47 |
+
[
|
| 48 |
+
"今天适合出去玩吗?",
|
| 49 |
+
"非常适合,空气质量很好。"
|
| 50 |
+
]
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
]
|
| 54 |
+
对于上述格式的数据, dataset_info.json 中的 数据集描述 应为:
|
| 55 |
+
|
| 56 |
+
"数据集名称": {
|
| 57 |
+
"file_name": "data.json",
|
| 58 |
+
"columns": {
|
| 59 |
+
"prompt": "instruction",
|
| 60 |
+
"query": "input",
|
| 61 |
+
"response": "output",
|
| 62 |
+
"system": "system",
|
| 63 |
+
"history": "history"
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
## Train
|
| 68 |
+
|
| 69 |
+
After cleaning and collecting the data, you should write a script to train the `Qwen2.5-3B-Instruct` model using this created dataset.
|
| 70 |
+
|
| 71 |
+
The training is using `LLaMA-Factory`. You should read the dir and write a script to train the model.
|
README.md
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[](https://arxiv.org/abs/2412.09645)
|
| 2 |
+
[](https://vchitect.github.io/Evaluation-Agent-project/)
|
| 3 |
+

|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
This repository contains the implementation of the following work:
|
| 8 |
+
> **Evaluation Agent: Efficient and Promptable Evaluation Framework for Visual Generative Models**<br>
|
| 9 |
+
> [Fan Zhang](https://github.com/zhangfan-p)<sup>∗</sup>, [Shulin Tian](https://shulin16.github.io/)<sup>∗</sup>, [Ziqi Huang](https://ziqihuangg.github.io/)<sup>∗</sup>, [Yu Qiao](http://mmlab.siat.ac.cn/yuqiao/index.html)<sup>+</sup>, [Ziwei Liu](https://liuziwei7.github.io/)<sup>+</sup><br>
|
| 10 |
+
> The 63rd Annual Meeting of the Association for Computational Linguistics (**ACL 2025**), Oral
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
<a name="overview"></a>
|
| 16 |
+
## :mega: Overview
|
| 17 |
+
|
| 18 |
+
Recent advancements in visual generative models have enabled high-quality image and video generation, opening diverse applications. However, evaluating these models often demands sampling hundreds or thousands of images or videos, making the process computationally expensive, especially for diffusion-based models with inherently slow sampling. Moreover, existing evaluation methods rely on rigid pipelines that overlook specific user needs and provide numerical results without clear explanations. In contrast, humans can quickly form impressions of a model's capabilities by observing only a few samples. To mimic this, we propose the Evaluation Agent framework, which employs human-like strategies for efficient, dynamic, multi-round evaluations using only a few samples per round, while offering detailed, user-tailored analyses. It offers four key advantages: 1) efficiency, 2) promptable evaluation tailored to diverse user needs, 3) explainability beyond single numerical scores, and 4) scalability across various models and tools. Experiments show that Evaluation Agent reduces evaluation time to 10% of traditional methods while delivering comparable results. The Evaluation Agent framework is fully open-sourced to advance research in visual generative models and their efficient evaluation.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
**Overview of Evaluation Agent Framework.** This framework leverages LLM-powered agents for efficient and flexible visual model assessments. As shown, it consists of two stages: (a) the Proposal Stage, where user queries are decomposed into sub-aspects, and prompts are generated, and (b) the Execution Stage, where visual content is generated and evaluated using an Evaluation Toolkit. The two stages interact iteratively to dynamically assess models based on user queries.
|
| 24 |
+
|
| 25 |
+
<a name="installation"></a>
|
| 26 |
+
## :hammer: Installation
|
| 27 |
+
|
| 28 |
+
1. Clone the repository.
|
| 29 |
+
|
| 30 |
+
```bash
|
| 31 |
+
git clone https://github.com/Vchitect/Evaluation-Agent.git
|
| 32 |
+
cd Evaluation-Agent
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
2. Install the environment.
|
| 36 |
+
```bash
|
| 37 |
+
conda create -n eval_agent python=3.10
|
| 38 |
+
conda activate eval_agent
|
| 39 |
+
pip install -r requirements.txt
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
<a name="usage"></a>
|
| 45 |
+
## Usage
|
| 46 |
+
|
| 47 |
+
First, you need to configure the `open_api_key`. You can do it as follows:
|
| 48 |
+
```
|
| 49 |
+
export OPENAI_API_KEY="your_api_key_here"
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
### Evaluation of Open-ended Questions on T2I Models
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
```
|
| 56 |
+
python open_ended_eval.py --user_query $USER_QUERY --model $MODEL
|
| 57 |
+
```
|
| 58 |
+
- `$USER_QUERY` can be any question regarding the model’s capabilities, such as ‘How well does the model generate trees in anime style?’
|
| 59 |
+
- `$MODEL` refers to the image generation model you want to evaluate. Currently, we support four models: [SD-14](https://huggingface.co/CompVis/stable-diffusion-v1-4), [SD-21](https://huggingface.co/stabilityai/stable-diffusion-2-1), [SDXL-1](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and [SD-3](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers). You can integrate new models in the following path: `./eval_agent/eval_models/`
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
### Evaluation Based on the VBench Tools on T2V Models
|
| 63 |
+
|
| 64 |
+
#### Preparation
|
| 65 |
+
|
| 66 |
+
1. Configure the VBench Environment
|
| 67 |
+
|
| 68 |
+
- You need to configure the VBench environment on top of the existing environment. For details, refer to [VBench](https://github.com/Vchitect/VBench).
|
| 69 |
+
|
| 70 |
+
2. Prepare the Model to be Evaluated
|
| 71 |
+
|
| 72 |
+
- Download the weights of the target model for evaluation and place them in `./eval_agent/eval_models/{model_name}/checkpoints/`.
|
| 73 |
+
|
| 74 |
+
- Currently, we support four models: [latte](https://github.com/Vchitect/Latte/tree/main), [modelscope](https://modelscope.cn/models/iic/text-to-video-synthesis/summary), [videocrafter-0.9](https://github.com/AILab-CVC/VideoCrafter/tree/30048d49873cbcd21077a001e6a3232e0909d254), and [videocrafter-2](https://github.com/AILab-CVC/VideoCrafter). These models may also have specific environment requirements. For details, please refer to the respective model links.
|
| 75 |
+
|
| 76 |
+
#### Command
|
| 77 |
+
|
| 78 |
+
```
|
| 79 |
+
python eval_agent_for_vbench.py --user_query $USER_QUERY --model $MODEL
|
| 80 |
+
```
|
| 81 |
+
- `$USER_QUERY` need to be related to the 15 dimensions of VBench. These dimensions are: `subject_consistency`, `background_consistency`, `motion_smoothness`, `dynamic_degree`, `aesthetic_quality`, `imaging_quality`, `object_class`, `multiple_objects`, `human_action`, `color`, `spatial_relationship`, `scene`, `temporal_style`, `appearance_style`, and `overall_consistency`.
|
| 82 |
+
- `$MODEL` refers to the video generation model you want to evaluate.
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
### Evaluation Based on the T2I-CompBench Tools on T2I Models
|
| 87 |
+
|
| 88 |
+
#### Preparation
|
| 89 |
+
|
| 90 |
+
1. Configure the T2I-CompBench Environment
|
| 91 |
+
|
| 92 |
+
- You need to configure the T2I-CompBench environment on top of the existing environment. For details, refer to [T2I-CompBench](https://github.com/Karine-Huang/T2I-CompBench/tree/6ea770ada4eea55fa7b09caa2d2fb63fe4d6bf8f).
|
| 93 |
+
|
| 94 |
+
2. Prepare the Model to be Evaluated
|
| 95 |
+
|
| 96 |
+
#### Command
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
python eval_agent_for_t2i_compbench.py --user_query $USER_QUERY --model $MODEL
|
| 100 |
+
```
|
| 101 |
+
- `$USER_QUERY` need to be related to the 4 dimensions of T2I-CompBench. These dimensions are: `color_binding`, `shape_binding`, `texture_binding`, `non-spatial relationship`.
|
| 102 |
+
- `$MODEL` refers to the image generation model you want to evaluate.
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
## Open-Ended User Query Dataset
|
| 109 |
+
We propose the **Open-Ended User Query Dataset**, developed through a user study. As part of this process, we gathered questions from various sources, focusing on aspects users consider most important when evaluating new models. After cleaning, filtering, and expanding the initial set, we compiled a refined dataset of 100 open-ended user queries.
|
| 110 |
+
|
| 111 |
+
Check out the details of the [open-ended user query dataset](https://github.com/Vchitect/Evaluation-Agent/tree/main/dataset)
|
| 112 |
+
|
| 113 |
+

|
| 114 |
+
The three graphs give an overview of the distributions and types of our curated open queries set. Left: the distribution of question types, which are categorized as `General` or `Specific`. Middle: the distribution of the ability types, which are categorized as `Prompt Following`, `Visual Quality`, `Creativity`, `Knowledge` and `Others`. Right: the distribution of the content categories, which are categorized as `History and Culture`, `Film and Entertainment`, `Science and Education`, `Fashion`, `Medical`, `Game Design`, `Architecture and Interior Design`, `Law`.
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
## Citation
|
| 118 |
+
|
| 119 |
+
If you find our repo useful for your research, please consider citing our paper:
|
| 120 |
+
|
| 121 |
+
```bibtex
|
| 122 |
+
@article{zhang2024evaluationagent,
|
| 123 |
+
title = {Evaluation Agent: Efficient and Promptable Evaluation Framework for Visual Generative Models},
|
| 124 |
+
author = {Zhang, Fan and Tian, Shulin and Huang, Ziqi and Qiao, Yu and Liu, Ziwei},
|
| 125 |
+
journal={arXiv preprint arXiv:2412.09645},
|
| 126 |
+
year = {2024}
|
| 127 |
+
}
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
## Related Links
|
| 131 |
+
|
| 132 |
+
Our related projects: [VBench](https://github.com/Vchitect/VBench)
|
| 133 |
+
|
| 134 |
+
```bibtex
|
| 135 |
+
@InProceedings{huang2023vbench,
|
| 136 |
+
title={{VBench}: Comprehensive Benchmark Suite for Video Generative Models},
|
| 137 |
+
author={Huang, Ziqi and He, Yinan and Yu, Jiashuo and Zhang, Fan and Si, Chenyang and Jiang, Yuming and Zhang, Yuanhan and Wu, Tianxing and Jin, Qingyang and Chanpaisit, Nattapol and Wang, Yaohui and Chen, Xinyuan and Wang, Limin and Lin, Dahua and Qiao, Yu and Liu, Ziwei},
|
| 138 |
+
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
| 139 |
+
year={2024}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
@article{huang2024vbench++,
|
| 143 |
+
title={{VBench++}: Comprehensive and Versatile Benchmark Suite for Video Generative Models},
|
| 144 |
+
author={Huang, Ziqi and Zhang, Fan and Xu, Xiaojie and He, Yinan and Yu, Jiashuo and Dong, Ziyue and Ma, Qianli and Chanpaisit, Nattapol and Si, Chenyang and Jiang, Yuming and Wang, Yaohui and Chen, Xinyuan and Chen, Ying-Cong and Wang, Limin and Lin, Dahua and Qiao, Yu and Liu, Ziwei},
|
| 145 |
+
journal={arXiv preprint arXiv:2411.13503},
|
| 146 |
+
year={2024}
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
@article{zheng2025vbench2,
|
| 150 |
+
title={{VBench-2.0}: Advancing Video Generation Benchmark Suite for Intrinsic Faithfulness},
|
| 151 |
+
author={Zheng, Dian and Huang, Ziqi and Liu, Hongbo and Zou, Kai and He, Yinan and Zhang, Fan and Zhang, Yuanhan and He, Jingwen and Zheng, Wei-Shi and Qiao, Yu and Liu, Ziwei},
|
| 152 |
+
journal={arXiv preprint arXiv:2503.21755},
|
| 153 |
+
year={2025}
|
| 154 |
+
}
|
| 155 |
+
```
|
README_TRAINING.md
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Training Qwen2.5-3B-Instruct for Evaluation Agent with CoT Reasoning
|
| 2 |
+
|
| 3 |
+
This repository contains scripts and configurations for training Qwen2.5-3B-Instruct model on evaluation agent data with Chain-of-Thought (CoT) reasoning format.
|
| 4 |
+
|
| 5 |
+
## Overview
|
| 6 |
+
|
| 7 |
+
The training pipeline processes evaluation results from:
|
| 8 |
+
- **VBench**: Video quality evaluation results
|
| 9 |
+
- **T2I-CompBench**: Text-to-image composition evaluation results
|
| 10 |
+
- **Open Domain**: Open-ended query evaluation results
|
| 11 |
+
|
| 12 |
+
All results are in CoT (Chain-of-Thought) reasoning format from proprietary models.
|
| 13 |
+
|
| 14 |
+
## Dataset Preparation
|
| 15 |
+
|
| 16 |
+
### 1. Data Cleaning and Conversion
|
| 17 |
+
|
| 18 |
+
Run the data cleaning script to convert raw evaluation results into LLaMA-Factory format:
|
| 19 |
+
|
| 20 |
+
```bash
|
| 21 |
+
python clean_and_convert_data.py
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
This script:
|
| 25 |
+
- Processes JSON files from `ea-data/agent/` subdirectories
|
| 26 |
+
- Converts CoT-style evaluation results into instruction-response pairs
|
| 27 |
+
- Outputs to `LLaMA-Factory/data/evaluation_agent_cot_dataset.json`
|
| 28 |
+
- Updates `LLaMA-Factory/data/dataset_info.json` with dataset metadata
|
| 29 |
+
|
| 30 |
+
### Dataset Statistics
|
| 31 |
+
- Total training examples: ~860 (from initial processing)
|
| 32 |
+
- Format: Alpaca-style (instruction, input, output)
|
| 33 |
+
|
| 34 |
+
## Training Configurations
|
| 35 |
+
|
| 36 |
+
### 1. LoRA Fine-tuning (Recommended)
|
| 37 |
+
|
| 38 |
+
**Configuration:** `train_qwen2.5_eval_agent.yaml`
|
| 39 |
+
|
| 40 |
+
Key parameters:
|
| 41 |
+
- Model: Qwen/Qwen2.5-3B-Instruct
|
| 42 |
+
- Method: LoRA (rank=16, alpha=32)
|
| 43 |
+
- Batch size: 2 per device × 4 gradient accumulation
|
| 44 |
+
- Learning rate: 5e-5 with cosine scheduler
|
| 45 |
+
- Epochs: 3
|
| 46 |
+
- Memory requirement: ~16GB VRAM
|
| 47 |
+
|
| 48 |
+
### 2. Full Fine-tuning
|
| 49 |
+
|
| 50 |
+
**Configuration:** `train_qwen2.5_eval_agent_full.yaml`
|
| 51 |
+
|
| 52 |
+
Key parameters:
|
| 53 |
+
- Model: Qwen/Qwen2.5-3B-Instruct
|
| 54 |
+
- Method: Full fine-tuning with DeepSpeed
|
| 55 |
+
- Gradient checkpointing enabled
|
| 56 |
+
- Memory requirement: ~32GB+ VRAM
|
| 57 |
+
|
| 58 |
+
## Training Execution
|
| 59 |
+
|
| 60 |
+
### Quick Start
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
# Make script executable
|
| 64 |
+
chmod +x train_qwen2.5_eval_agent.sh
|
| 65 |
+
|
| 66 |
+
# Run training
|
| 67 |
+
./train_qwen2.5_eval_agent.sh
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
### Manual Training
|
| 71 |
+
|
| 72 |
+
```bash
|
| 73 |
+
cd LLaMA-Factory
|
| 74 |
+
llamafactory-cli train ../train_qwen2.5_eval_agent.yaml
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
### Distributed Training
|
| 78 |
+
|
| 79 |
+
For multi-GPU training:
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
CUDA_VISIBLE_DEVICES=0,1,2,3 \
|
| 83 |
+
torchrun --nproc_per_node 4 \
|
| 84 |
+
--master_port 29500 \
|
| 85 |
+
src/train.py ../train_qwen2.5_eval_agent.yaml
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
## Inference
|
| 89 |
+
|
| 90 |
+
After training, run inference with:
|
| 91 |
+
|
| 92 |
+
```bash
|
| 93 |
+
llamafactory-cli chat ../inference_qwen2.5_eval_agent.yaml
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
Or use the API:
|
| 97 |
+
|
| 98 |
+
```bash
|
| 99 |
+
llamafactory-cli api ../inference_qwen2.5_eval_agent.yaml
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
## Model Merging
|
| 103 |
+
|
| 104 |
+
To merge LoRA weights with base model:
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
llamafactory-cli export \
|
| 108 |
+
--model_name_or_path Qwen/Qwen2.5-3B-Instruct \
|
| 109 |
+
--adapter_name_or_path saves/qwen2.5-3b/lora/eval_agent_cot \
|
| 110 |
+
--template qwen \
|
| 111 |
+
--finetuning_type lora \
|
| 112 |
+
--export_dir models/qwen2.5-3b-eval-agent-merged \
|
| 113 |
+
--export_size 4 \
|
| 114 |
+
--export_legacy_format false
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
## Monitoring Training
|
| 118 |
+
|
| 119 |
+
### TensorBoard
|
| 120 |
+
|
| 121 |
+
```bash
|
| 122 |
+
tensorboard --logdir saves/qwen2.5-3b/lora/eval_agent_cot
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
### Loss Plots
|
| 126 |
+
|
| 127 |
+
Training loss plots are automatically saved to the output directory.
|
| 128 |
+
|
| 129 |
+
## Evaluation
|
| 130 |
+
|
| 131 |
+
The model will be evaluated on:
|
| 132 |
+
- CoT reasoning quality
|
| 133 |
+
- Evaluation accuracy
|
| 134 |
+
- Response coherence
|
| 135 |
+
- Format consistency
|
| 136 |
+
|
| 137 |
+
## Directory Structure
|
| 138 |
+
|
| 139 |
+
```
|
| 140 |
+
evaluation_agent_dev/
|
| 141 |
+
├── ea-data/agent/ # Raw evaluation data
|
| 142 |
+
│ ├── vbench_results/
|
| 143 |
+
│ ├── t2i_results/
|
| 144 |
+
│ └── open_results/
|
| 145 |
+
├── LLaMA-Factory/ # Training framework
|
| 146 |
+
│ └── data/
|
| 147 |
+
│ ├── evaluation_agent_cot_dataset.json # Processed dataset
|
| 148 |
+
│ └── dataset_info.json
|
| 149 |
+
├── clean_and_convert_data.py # Data processing script
|
| 150 |
+
├── train_qwen2.5_eval_agent.yaml # LoRA training config
|
| 151 |
+
├── train_qwen2.5_eval_agent_full.yaml # Full training config
|
| 152 |
+
├── inference_qwen2.5_eval_agent.yaml # Inference config
|
| 153 |
+
└── train_qwen2.5_eval_agent.sh # Training script
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
## Requirements
|
| 157 |
+
|
| 158 |
+
- Python 3.9+
|
| 159 |
+
- PyTorch 2.0+
|
| 160 |
+
- CUDA 11.6+
|
| 161 |
+
- LLaMA-Factory (installed)
|
| 162 |
+
- 16GB+ VRAM for LoRA, 32GB+ for full fine-tuning
|
| 163 |
+
|
| 164 |
+
## Tips
|
| 165 |
+
|
| 166 |
+
1. **Memory Management**: Use gradient checkpointing and DeepSpeed for larger batch sizes
|
| 167 |
+
2. **Learning Rate**: Start with 5e-5 for LoRA, 2e-5 for full fine-tuning
|
| 168 |
+
3. **Data Quality**: Review generated dataset for quality before training
|
| 169 |
+
4. **Checkpointing**: Save checkpoints frequently (every 200 steps)
|
| 170 |
+
5. **Mixed Precision**: Use bf16 for faster training and lower memory usage
|
| 171 |
+
|
| 172 |
+
## Troubleshooting
|
| 173 |
+
|
| 174 |
+
- **OOM Errors**: Reduce batch size or enable gradient checkpointing
|
| 175 |
+
- **Slow Training**: Enable Flash Attention 2 if available
|
| 176 |
+
- **Poor Results**: Increase training epochs or adjust learning rate
|
| 177 |
+
- **Data Issues**: Check JSON parsing in data cleaning script
|
| 178 |
+
|
| 179 |
+
## Next Steps
|
| 180 |
+
|
| 181 |
+
1. Expand dataset with more evaluation examples
|
| 182 |
+
2. Implement custom evaluation metrics
|
| 183 |
+
3. Fine-tune on specific evaluation dimensions
|
| 184 |
+
4. Deploy model for production use
|
| 185 |
+
|
| 186 |
+
## License
|
| 187 |
+
|
| 188 |
+
Follow the licenses of:
|
| 189 |
+
- Qwen2.5 model
|
| 190 |
+
- LLaMA-Factory framework
|
| 191 |
+
- Original evaluation datasets
|
assets/fig_framework.jpg
ADDED
|
Git LFS Details
|
assets/fig_teaser.jpg
ADDED
|
Git LFS Details
|
assets/open_dataset_stats.png
ADDED
|
Git LFS Details
|
collect_chat_history_improved.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Improved script to find all JSON files starting with 'chat_history_output'
|
| 4 |
+
and copy them to data/preprocess folder with shorter filenames.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import shutil
|
| 9 |
+
import glob
|
| 10 |
+
import hashlib
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
def generate_short_name(original_path, counter):
|
| 14 |
+
"""Generate a short, unique filename."""
|
| 15 |
+
# Extract key parts
|
| 16 |
+
path_parts = original_path.split(os.sep)
|
| 17 |
+
|
| 18 |
+
# Find relevant parts
|
| 19 |
+
model_name = None
|
| 20 |
+
dimension = None
|
| 21 |
+
for part in path_parts:
|
| 22 |
+
if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']:
|
| 23 |
+
model_name = part
|
| 24 |
+
if part.startswith('2024-') and 'How_' in part:
|
| 25 |
+
# Extract the question and make it shorter
|
| 26 |
+
question = part.split('-', 3)[-1] if len(part.split('-', 3)) > 3 else part
|
| 27 |
+
# Take first few words and create a hash for uniqueness
|
| 28 |
+
words = question.replace('How_', '').replace('_', ' ').split()[:3]
|
| 29 |
+
dimension = '_'.join(words[:3]).replace('?', '')
|
| 30 |
+
|
| 31 |
+
# Create hash of full path for uniqueness
|
| 32 |
+
path_hash = hashlib.md5(original_path.encode()).hexdigest()[:8]
|
| 33 |
+
|
| 34 |
+
# Build short name
|
| 35 |
+
parts = []
|
| 36 |
+
if model_name:
|
| 37 |
+
parts.append(model_name)
|
| 38 |
+
if dimension:
|
| 39 |
+
parts.append(dimension[:30]) # Limit dimension length
|
| 40 |
+
parts.append(f"hash_{path_hash}")
|
| 41 |
+
parts.append(f"id_{counter:04d}")
|
| 42 |
+
|
| 43 |
+
return f"{'_'.join(parts)}.json"
|
| 44 |
+
|
| 45 |
+
def find_and_copy_chat_history_files():
|
| 46 |
+
"""Find all chat_history_output*.json files and copy them to data/preprocess."""
|
| 47 |
+
|
| 48 |
+
# Source directory
|
| 49 |
+
# source_dir = "/home/data2/sltian/code/evaluation_agent_dev/ea-data/agent/vbench_results"
|
| 50 |
+
source_dir = "/home/data2/sltian/code/evaluation_agent_dev/ea-data/agent/t2i_results"
|
| 51 |
+
|
| 52 |
+
# Destination directory
|
| 53 |
+
dest_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess-t2i"
|
| 54 |
+
|
| 55 |
+
# Create destination directory if it doesn't exist
|
| 56 |
+
os.makedirs(dest_dir, exist_ok=True)
|
| 57 |
+
|
| 58 |
+
# Find all *chat_history*.json files recursively
|
| 59 |
+
pattern = os.path.join(source_dir, "**/*chat_history*.json")
|
| 60 |
+
chat_files = glob.glob(pattern, recursive=True)
|
| 61 |
+
|
| 62 |
+
print(f"Found {len(chat_files)} *chat_history*.json files")
|
| 63 |
+
|
| 64 |
+
copied_files = []
|
| 65 |
+
counter = 1
|
| 66 |
+
|
| 67 |
+
for file_path in chat_files:
|
| 68 |
+
# Generate a short filename
|
| 69 |
+
short_filename = generate_short_name(file_path, counter)
|
| 70 |
+
|
| 71 |
+
# Destination file path
|
| 72 |
+
dest_file = os.path.join(dest_dir, short_filename)
|
| 73 |
+
|
| 74 |
+
# Copy the file
|
| 75 |
+
try:
|
| 76 |
+
shutil.copy2(file_path, dest_file)
|
| 77 |
+
copied_files.append((file_path, dest_file))
|
| 78 |
+
print(f"Copied [{counter:4d}]: {os.path.basename(file_path)} -> {short_filename}")
|
| 79 |
+
counter += 1
|
| 80 |
+
except Exception as e:
|
| 81 |
+
print(f"Error copying {file_path}: {e}")
|
| 82 |
+
|
| 83 |
+
print(f"\nSuccessfully copied {len(copied_files)} files to {dest_dir}")
|
| 84 |
+
|
| 85 |
+
# Create a detailed mapping file
|
| 86 |
+
mapping_file = os.path.join(dest_dir, "detailed_file_mapping.txt")
|
| 87 |
+
with open(mapping_file, "w") as f:
|
| 88 |
+
f.write("Short Filename -> Original Path\n")
|
| 89 |
+
f.write("=" * 80 + "\n")
|
| 90 |
+
for orig, copied in copied_files:
|
| 91 |
+
short_name = os.path.basename(copied)
|
| 92 |
+
f.write(f"{short_name} -> {orig}\n")
|
| 93 |
+
|
| 94 |
+
print(f"Created detailed file mapping at: {mapping_file}")
|
| 95 |
+
|
| 96 |
+
# Create a summary by model
|
| 97 |
+
summary_file = os.path.join(dest_dir, "summary_by_model.txt")
|
| 98 |
+
model_counts = {}
|
| 99 |
+
for orig, copied in copied_files:
|
| 100 |
+
path_parts = orig.split(os.sep)
|
| 101 |
+
model = None
|
| 102 |
+
for part in path_parts:
|
| 103 |
+
if part in ['vc2', 'vc09', 'modelscope', 'latte1', 'vc10-large', 'sdxl-1.0', 'sd-2.1', 'sd-1.4', 'sd-3']:
|
| 104 |
+
model = part
|
| 105 |
+
break
|
| 106 |
+
if model:
|
| 107 |
+
model_counts[model] = model_counts.get(model, 0) + 1
|
| 108 |
+
|
| 109 |
+
with open(summary_file, "w") as f:
|
| 110 |
+
f.write("Summary by Model\n")
|
| 111 |
+
f.write("=" * 30 + "\n")
|
| 112 |
+
for model, count in sorted(model_counts.items()):
|
| 113 |
+
f.write(f"{model}: {count} files\n")
|
| 114 |
+
f.write(f"\nTotal: {sum(model_counts.values())} files\n")
|
| 115 |
+
|
| 116 |
+
print(f"Created summary at: {summary_file}")
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
find_and_copy_chat_history_files()
|
data_pre/cot_gen/annotation_tool.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from typing import List, Dict, Any
|
| 4 |
+
import pandas as pd
|
| 5 |
+
|
| 6 |
+
class AnnotationInterface:
|
| 7 |
+
"""Web interface for annotating plan agent trajectories"""
|
| 8 |
+
|
| 9 |
+
def __init__(self, data_file: str):
|
| 10 |
+
self.data = self.load_data(data_file)
|
| 11 |
+
self.current_idx = 0
|
| 12 |
+
self.annotations = []
|
| 13 |
+
|
| 14 |
+
def load_data(self, file_path: str) -> List[Dict]:
|
| 15 |
+
"""Load trajectories for annotation"""
|
| 16 |
+
with open(file_path, 'r') as f:
|
| 17 |
+
data = json.load(f)
|
| 18 |
+
return data.get("trajectories", [])
|
| 19 |
+
|
| 20 |
+
def get_current_example(self) -> Dict:
|
| 21 |
+
"""Get current example for annotation"""
|
| 22 |
+
if 0 <= self.current_idx < len(self.data):
|
| 23 |
+
return self.data[self.current_idx]
|
| 24 |
+
return {}
|
| 25 |
+
|
| 26 |
+
def format_trajectory(self, trajectory: List[Dict]) -> str:
|
| 27 |
+
"""Format trajectory for display"""
|
| 28 |
+
formatted = []
|
| 29 |
+
for step in trajectory:
|
| 30 |
+
if step["decision_type"] == "explore":
|
| 31 |
+
formatted.append(f"Step {step['step_number']}:")
|
| 32 |
+
formatted.append(f" Sub-aspect: {step['sub_aspect']}")
|
| 33 |
+
formatted.append(f" Tool: {step['tool']}")
|
| 34 |
+
formatted.append(f" Thought: {step['thought']}")
|
| 35 |
+
else:
|
| 36 |
+
formatted.append(f"Final Summary:")
|
| 37 |
+
formatted.append(f" {step.get('summary', '')}")
|
| 38 |
+
return "\n".join(formatted)
|
| 39 |
+
|
| 40 |
+
def annotate_current(
|
| 41 |
+
self,
|
| 42 |
+
quality_score: int,
|
| 43 |
+
strategy_appropriate: bool,
|
| 44 |
+
exploration_complete: bool,
|
| 45 |
+
optimal_stopping: bool,
|
| 46 |
+
improvements: str,
|
| 47 |
+
alternative_paths: str
|
| 48 |
+
) -> Dict:
|
| 49 |
+
"""Annotate current example"""
|
| 50 |
+
annotation = {
|
| 51 |
+
"example_idx": self.current_idx,
|
| 52 |
+
"user_query": self.get_current_example().get("user_query", ""),
|
| 53 |
+
"quality_score": quality_score,
|
| 54 |
+
"strategy_appropriate": strategy_appropriate,
|
| 55 |
+
"exploration_complete": exploration_complete,
|
| 56 |
+
"optimal_stopping_point": optimal_stopping,
|
| 57 |
+
"suggested_improvements": improvements,
|
| 58 |
+
"alternative_exploration_paths": alternative_paths,
|
| 59 |
+
"trajectory_length": len(self.get_current_example().get("trajectory", []))
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
self.annotations.append(annotation)
|
| 63 |
+
return annotation
|
| 64 |
+
|
| 65 |
+
def save_annotations(self, output_file: str = "annotations.json"):
|
| 66 |
+
"""Save all annotations"""
|
| 67 |
+
with open(output_file, 'w') as f:
|
| 68 |
+
json.dump({
|
| 69 |
+
"total_annotations": len(self.annotations),
|
| 70 |
+
"annotations": self.annotations
|
| 71 |
+
}, f, indent=2)
|
| 72 |
+
return f"Saved {len(self.annotations)} annotations"
|
| 73 |
+
|
| 74 |
+
def create_interface(self):
|
| 75 |
+
"""Create Gradio interface"""
|
| 76 |
+
with gr.Blocks() as interface:
|
| 77 |
+
gr.Markdown("# Plan Agent Trajectory Annotation Tool")
|
| 78 |
+
|
| 79 |
+
with gr.Row():
|
| 80 |
+
with gr.Column(scale=2):
|
| 81 |
+
query_display = gr.Textbox(
|
| 82 |
+
label="User Query",
|
| 83 |
+
value=self.get_current_example().get("user_query", ""),
|
| 84 |
+
interactive=False
|
| 85 |
+
)
|
| 86 |
+
trajectory_display = gr.Textbox(
|
| 87 |
+
label="Exploration Trajectory",
|
| 88 |
+
value=self.format_trajectory(
|
| 89 |
+
self.get_current_example().get("trajectory", [])
|
| 90 |
+
),
|
| 91 |
+
lines=20,
|
| 92 |
+
interactive=False
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
with gr.Column(scale=1):
|
| 96 |
+
gr.Markdown("### Annotation")
|
| 97 |
+
quality_score = gr.Slider(
|
| 98 |
+
1, 5, value=3, step=1,
|
| 99 |
+
label="Overall Quality (1-5)"
|
| 100 |
+
)
|
| 101 |
+
strategy_appropriate = gr.Checkbox(
|
| 102 |
+
label="Strategy Appropriate for Query?"
|
| 103 |
+
)
|
| 104 |
+
exploration_complete = gr.Checkbox(
|
| 105 |
+
label="Exploration Sufficiently Complete?"
|
| 106 |
+
)
|
| 107 |
+
optimal_stopping = gr.Checkbox(
|
| 108 |
+
label="Stopped at Optimal Point?"
|
| 109 |
+
)
|
| 110 |
+
improvements = gr.Textbox(
|
| 111 |
+
label="Suggested Improvements",
|
| 112 |
+
lines=3
|
| 113 |
+
)
|
| 114 |
+
alternative_paths = gr.Textbox(
|
| 115 |
+
label="Alternative Exploration Paths",
|
| 116 |
+
lines=3
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
with gr.Row():
|
| 120 |
+
prev_btn = gr.Button("Previous")
|
| 121 |
+
next_btn = gr.Button("Next")
|
| 122 |
+
save_btn = gr.Button("Save Annotations")
|
| 123 |
+
|
| 124 |
+
progress = gr.Textbox(
|
| 125 |
+
label="Progress",
|
| 126 |
+
value=f"{self.current_idx + 1}/{len(self.data)}"
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
# Button actions
|
| 130 |
+
def go_next(q, s, e, o, i, a):
|
| 131 |
+
self.annotate_current(q, s, e, o, i, a)
|
| 132 |
+
self.current_idx = min(self.current_idx + 1, len(self.data) - 1)
|
| 133 |
+
example = self.get_current_example()
|
| 134 |
+
return (
|
| 135 |
+
example.get("user_query", ""),
|
| 136 |
+
self.format_trajectory(example.get("trajectory", [])),
|
| 137 |
+
f"{self.current_idx + 1}/{len(self.data)}"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
def go_prev():
|
| 141 |
+
self.current_idx = max(self.current_idx - 1, 0)
|
| 142 |
+
example = self.get_current_example()
|
| 143 |
+
return (
|
| 144 |
+
example.get("user_query", ""),
|
| 145 |
+
self.format_trajectory(example.get("trajectory", [])),
|
| 146 |
+
f"{self.current_idx + 1}/{len(self.data)}"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
next_btn.click(
|
| 150 |
+
go_next,
|
| 151 |
+
inputs=[quality_score, strategy_appropriate, exploration_complete,
|
| 152 |
+
optimal_stopping, improvements, alternative_paths],
|
| 153 |
+
outputs=[query_display, trajectory_display, progress]
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
prev_btn.click(
|
| 157 |
+
go_prev,
|
| 158 |
+
outputs=[query_display, trajectory_display, progress]
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
save_btn.click(
|
| 162 |
+
lambda: self.save_annotations(),
|
| 163 |
+
outputs=progress
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
return interface
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# Usage
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
# Create annotation interface
|
| 172 |
+
annotator = AnnotationInterface("collected_trajectories.json")
|
| 173 |
+
interface = annotator.create_interface()
|
| 174 |
+
|
| 175 |
+
# Launch web interface
|
| 176 |
+
interface.launch(share=True)
|
data_pre/cot_gen/augment_dataset.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
from typing import List, Dict, Any
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
class DatasetAugmenter:
|
| 7 |
+
"""Augment and enhance training dataset quality"""
|
| 8 |
+
|
| 9 |
+
def __init__(self, base_dataset: str, annotations: str = None):
|
| 10 |
+
self.base_data = self.load_json(base_dataset)
|
| 11 |
+
self.annotations = self.load_json(annotations) if annotations else {}
|
| 12 |
+
|
| 13 |
+
def load_json(self, file_path: str) -> Dict:
|
| 14 |
+
"""Load JSON data"""
|
| 15 |
+
with open(file_path, 'r') as f:
|
| 16 |
+
return json.load(f)
|
| 17 |
+
|
| 18 |
+
def augment_with_variations(self, trajectory: List[Dict]) -> List[List[Dict]]:
|
| 19 |
+
"""Create variations of a trajectory"""
|
| 20 |
+
variations = []
|
| 21 |
+
|
| 22 |
+
# 1. Early stopping variation
|
| 23 |
+
if len(trajectory) > 3:
|
| 24 |
+
early_stop = trajectory[:random.randint(2, len(trajectory)-1)]
|
| 25 |
+
variations.append(early_stop)
|
| 26 |
+
|
| 27 |
+
# 2. Different tool selection
|
| 28 |
+
tool_varied = []
|
| 29 |
+
alternative_tools = {
|
| 30 |
+
"Color Binding": ["Color", "Overall Consistency"],
|
| 31 |
+
"Shape Binding": ["Object Class", "Multiple Objects"],
|
| 32 |
+
"Subject Consistency": ["Appearance Style", "Overall Consistency"],
|
| 33 |
+
"Motion Smoothness": ["Dynamic Degree", "Temporal Style"]
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
for step in trajectory:
|
| 37 |
+
step_copy = step.copy()
|
| 38 |
+
if step.get("tool") in alternative_tools:
|
| 39 |
+
step_copy["tool"] = random.choice(alternative_tools[step["tool"]])
|
| 40 |
+
tool_varied.append(step_copy)
|
| 41 |
+
variations.append(tool_varied)
|
| 42 |
+
|
| 43 |
+
# 3. Reordered exploration
|
| 44 |
+
if len(trajectory) > 2:
|
| 45 |
+
reordered = trajectory.copy()
|
| 46 |
+
# Swap middle steps
|
| 47 |
+
if len(reordered) > 3:
|
| 48 |
+
idx1, idx2 = random.sample(range(1, len(reordered)-1), 2)
|
| 49 |
+
reordered[idx1], reordered[idx2] = reordered[idx2], reordered[idx1]
|
| 50 |
+
variations.append(reordered)
|
| 51 |
+
|
| 52 |
+
return variations
|
| 53 |
+
|
| 54 |
+
def add_negative_examples(self, good_trajectory: List[Dict]) -> List[Dict]:
|
| 55 |
+
"""Create negative examples from good trajectories"""
|
| 56 |
+
negative_examples = []
|
| 57 |
+
|
| 58 |
+
# 1. Redundant exploration (same tool/aspect multiple times)
|
| 59 |
+
if len(good_trajectory) > 2:
|
| 60 |
+
redundant = good_trajectory.copy()
|
| 61 |
+
# Duplicate a random step
|
| 62 |
+
dup_idx = random.randint(0, len(good_trajectory)-2)
|
| 63 |
+
redundant.insert(dup_idx + 1, good_trajectory[dup_idx])
|
| 64 |
+
negative_examples.append({
|
| 65 |
+
"trajectory": redundant,
|
| 66 |
+
"issue": "redundant_exploration",
|
| 67 |
+
"quality": "poor"
|
| 68 |
+
})
|
| 69 |
+
|
| 70 |
+
# 2. Premature stopping
|
| 71 |
+
if len(good_trajectory) > 3:
|
| 72 |
+
premature = good_trajectory[:2]
|
| 73 |
+
negative_examples.append({
|
| 74 |
+
"trajectory": premature,
|
| 75 |
+
"issue": "premature_stopping",
|
| 76 |
+
"quality": "poor"
|
| 77 |
+
})
|
| 78 |
+
|
| 79 |
+
# 3. Wrong tool selection
|
| 80 |
+
wrong_tool = []
|
| 81 |
+
for step in good_trajectory:
|
| 82 |
+
step_copy = step.copy()
|
| 83 |
+
if random.random() < 0.3: # 30% chance of wrong tool
|
| 84 |
+
all_tools = ["Color Binding", "Shape Binding", "Texture Binding",
|
| 85 |
+
"Subject Consistency", "Motion Smoothness", "Object Class"]
|
| 86 |
+
step_copy["tool"] = random.choice(all_tools)
|
| 87 |
+
wrong_tool.append(step_copy)
|
| 88 |
+
negative_examples.append({
|
| 89 |
+
"trajectory": wrong_tool,
|
| 90 |
+
"issue": "inappropriate_tools",
|
| 91 |
+
"quality": "poor"
|
| 92 |
+
})
|
| 93 |
+
|
| 94 |
+
return negative_examples
|
| 95 |
+
|
| 96 |
+
def enrich_with_reasoning(self, trajectory: List[Dict]) -> List[Dict]:
|
| 97 |
+
"""Add detailed reasoning to trajectory steps"""
|
| 98 |
+
enriched = []
|
| 99 |
+
|
| 100 |
+
reasoning_templates = {
|
| 101 |
+
"explore": [
|
| 102 |
+
"Based on the previous results showing {observation}, we need to explore {aspect} to {goal}",
|
| 103 |
+
"The model performed {performance} on {previous_aspect}, so testing {current_aspect} will help determine {insight}",
|
| 104 |
+
"To fully answer the user's question about {topic}, examining {aspect} with {tool} is essential"
|
| 105 |
+
],
|
| 106 |
+
"summarize": [
|
| 107 |
+
"After {n} exploration steps, we have sufficient evidence that {conclusion}",
|
| 108 |
+
"The consistent pattern across {aspects} indicates {finding}, providing a complete answer",
|
| 109 |
+
"Further exploration would be redundant as we've established {key_insight}"
|
| 110 |
+
]
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
for i, step in enumerate(trajectory):
|
| 114 |
+
step_copy = step.copy()
|
| 115 |
+
|
| 116 |
+
# Add enriched reasoning
|
| 117 |
+
if step.get("decision_type") == "explore":
|
| 118 |
+
template = random.choice(reasoning_templates["explore"])
|
| 119 |
+
step_copy["enriched_thought"] = template.format(
|
| 120 |
+
observation="strong performance" if i == 0 else "mixed results",
|
| 121 |
+
aspect=step.get("sub_aspect", "this aspect"),
|
| 122 |
+
goal="understand the model's capabilities",
|
| 123 |
+
performance="well" if random.random() > 0.5 else "poorly",
|
| 124 |
+
previous_aspect="basic scenarios",
|
| 125 |
+
current_aspect=step.get("sub_aspect", "complex scenarios"),
|
| 126 |
+
insight="the model's boundaries",
|
| 127 |
+
topic="model capabilities",
|
| 128 |
+
tool=step.get("tool", "evaluation tool")
|
| 129 |
+
)
|
| 130 |
+
else:
|
| 131 |
+
template = random.choice(reasoning_templates["summarize"])
|
| 132 |
+
step_copy["enriched_thought"] = template.format(
|
| 133 |
+
n=i,
|
| 134 |
+
conclusion="the model has clear strengths and limitations",
|
| 135 |
+
aspects="all tested dimensions",
|
| 136 |
+
finding="consistent behavior patterns",
|
| 137 |
+
key_insight="the model's capability boundaries"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
enriched.append(step_copy)
|
| 141 |
+
|
| 142 |
+
return enriched
|
| 143 |
+
|
| 144 |
+
def compute_trajectory_metrics(self, trajectory: List[Dict]) -> Dict:
|
| 145 |
+
"""Compute quality metrics for a trajectory"""
|
| 146 |
+
metrics = {
|
| 147 |
+
"length": len(trajectory),
|
| 148 |
+
"tool_diversity": len(set(step.get("tool", "") for step in trajectory)),
|
| 149 |
+
"has_summary": any(step.get("decision_type") == "summarize" for step in trajectory),
|
| 150 |
+
"redundancy_score": 0,
|
| 151 |
+
"completeness_score": 0
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
# Check for redundancy
|
| 155 |
+
aspects = [step.get("sub_aspect", "") for step in trajectory]
|
| 156 |
+
metrics["redundancy_score"] = len(aspects) - len(set(aspects))
|
| 157 |
+
|
| 158 |
+
# Estimate completeness (heuristic)
|
| 159 |
+
if metrics["length"] < 3:
|
| 160 |
+
metrics["completeness_score"] = 0.3
|
| 161 |
+
elif metrics["length"] > 8:
|
| 162 |
+
metrics["completeness_score"] = 0.7
|
| 163 |
+
else:
|
| 164 |
+
metrics["completeness_score"] = 0.9
|
| 165 |
+
|
| 166 |
+
return metrics
|
| 167 |
+
|
| 168 |
+
def create_augmented_dataset(self, output_file: str):
|
| 169 |
+
"""Create fully augmented dataset"""
|
| 170 |
+
augmented_data = {
|
| 171 |
+
"version": "2.0",
|
| 172 |
+
"original_examples": 0,
|
| 173 |
+
"augmented_examples": 0,
|
| 174 |
+
"negative_examples": 0,
|
| 175 |
+
"examples": []
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
# Process each trajectory
|
| 179 |
+
for item in self.base_data.get("trajectories", []):
|
| 180 |
+
trajectory = item.get("trajectory", [])
|
| 181 |
+
user_query = item.get("user_query", "")
|
| 182 |
+
|
| 183 |
+
# Original example (enriched)
|
| 184 |
+
enriched = self.enrich_with_reasoning(trajectory)
|
| 185 |
+
metrics = self.compute_trajectory_metrics(enriched)
|
| 186 |
+
|
| 187 |
+
augmented_data["examples"].append({
|
| 188 |
+
"id": f"original_{augmented_data['original_examples']}",
|
| 189 |
+
"user_query": user_query,
|
| 190 |
+
"trajectory": enriched,
|
| 191 |
+
"quality": "good",
|
| 192 |
+
"metrics": metrics,
|
| 193 |
+
"source": "original"
|
| 194 |
+
})
|
| 195 |
+
augmented_data["original_examples"] += 1
|
| 196 |
+
|
| 197 |
+
# Variations
|
| 198 |
+
variations = self.augment_with_variations(trajectory)
|
| 199 |
+
for var in variations:
|
| 200 |
+
enriched_var = self.enrich_with_reasoning(var)
|
| 201 |
+
metrics = self.compute_trajectory_metrics(enriched_var)
|
| 202 |
+
|
| 203 |
+
augmented_data["examples"].append({
|
| 204 |
+
"id": f"augmented_{augmented_data['augmented_examples']}",
|
| 205 |
+
"user_query": user_query,
|
| 206 |
+
"trajectory": enriched_var,
|
| 207 |
+
"quality": "good",
|
| 208 |
+
"metrics": metrics,
|
| 209 |
+
"source": "augmented"
|
| 210 |
+
})
|
| 211 |
+
augmented_data["augmented_examples"] += 1
|
| 212 |
+
|
| 213 |
+
# Negative examples
|
| 214 |
+
negatives = self.add_negative_examples(trajectory)
|
| 215 |
+
for neg in negatives:
|
| 216 |
+
metrics = self.compute_trajectory_metrics(neg["trajectory"])
|
| 217 |
+
|
| 218 |
+
augmented_data["examples"].append({
|
| 219 |
+
"id": f"negative_{augmented_data['negative_examples']}",
|
| 220 |
+
"user_query": user_query,
|
| 221 |
+
"trajectory": neg["trajectory"],
|
| 222 |
+
"quality": neg["quality"],
|
| 223 |
+
"issue": neg["issue"],
|
| 224 |
+
"metrics": metrics,
|
| 225 |
+
"source": "negative"
|
| 226 |
+
})
|
| 227 |
+
augmented_data["negative_examples"] += 1
|
| 228 |
+
|
| 229 |
+
# Save augmented dataset
|
| 230 |
+
with open(output_file, 'w') as f:
|
| 231 |
+
json.dump(augmented_data, f, indent=2)
|
| 232 |
+
|
| 233 |
+
print(f"Created augmented dataset with:")
|
| 234 |
+
print(f" Original examples: {augmented_data['original_examples']}")
|
| 235 |
+
print(f" Augmented examples: {augmented_data['augmented_examples']}")
|
| 236 |
+
print(f" Negative examples: {augmented_data['negative_examples']}")
|
| 237 |
+
print(f" Total examples: {len(augmented_data['examples'])}")
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
# Usage
|
| 241 |
+
if __name__ == "__main__":
|
| 242 |
+
augmenter = DatasetAugmenter("collected_trajectories.json")
|
| 243 |
+
augmenter.create_augmented_dataset("augmented_training_data.json")
|
data_pre/cot_gen/collect_trajectories.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import glob
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from typing import List, Dict, Any
|
| 6 |
+
|
| 7 |
+
class TrajectoryCollector:
|
| 8 |
+
"""Collect and process evaluation trajectories for training data"""
|
| 9 |
+
|
| 10 |
+
def __init__(self, base_dirs: List[str]):
|
| 11 |
+
self.base_dirs = base_dirs
|
| 12 |
+
self.trajectories = []
|
| 13 |
+
|
| 14 |
+
def extract_trajectory_data(self, eval_results: List[Any]) -> List[Dict]:
|
| 15 |
+
"""Extract structured trajectory data from evaluation results"""
|
| 16 |
+
trajectory_steps = []
|
| 17 |
+
|
| 18 |
+
for i, step in enumerate(eval_results):
|
| 19 |
+
if isinstance(step, str):
|
| 20 |
+
continue
|
| 21 |
+
|
| 22 |
+
# Extract decision data
|
| 23 |
+
if "Sub-aspect" in step: # T2I CompBench or VBench format
|
| 24 |
+
step_data = {
|
| 25 |
+
"step_number": i,
|
| 26 |
+
"decision_type": "explore",
|
| 27 |
+
"sub_aspect": step.get("Sub-aspect", ""),
|
| 28 |
+
"tool": step.get("Tool", ""),
|
| 29 |
+
"thought": step.get("Thought", ""),
|
| 30 |
+
"eval_results": step.get("eval_results", {})
|
| 31 |
+
}
|
| 32 |
+
elif "Analysis" in step: # Final summary
|
| 33 |
+
step_data = {
|
| 34 |
+
"step_number": i,
|
| 35 |
+
"decision_type": "summarize",
|
| 36 |
+
"thought": step.get("Thought", ""),
|
| 37 |
+
"analysis": step.get("Analysis", ""),
|
| 38 |
+
"summary": step.get("Summary", "")
|
| 39 |
+
}
|
| 40 |
+
else:
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
trajectory_steps.append(step_data)
|
| 44 |
+
|
| 45 |
+
return trajectory_steps
|
| 46 |
+
|
| 47 |
+
def collect_from_directory(self, directory: str) -> List[Dict]:
|
| 48 |
+
"""Collect all trajectories from a directory"""
|
| 49 |
+
collected_data = []
|
| 50 |
+
|
| 51 |
+
# Find all result JSON files
|
| 52 |
+
json_files = glob.glob(os.path.join(directory, "**/*.json"), recursive=True)
|
| 53 |
+
|
| 54 |
+
for json_file in json_files:
|
| 55 |
+
try:
|
| 56 |
+
with open(json_file, 'r') as f:
|
| 57 |
+
data = json.load(f)
|
| 58 |
+
|
| 59 |
+
# Extract user query (usually first element)
|
| 60 |
+
user_query = data[0] if isinstance(data[0], str) else ""
|
| 61 |
+
|
| 62 |
+
# Extract trajectory
|
| 63 |
+
trajectory = self.extract_trajectory_data(data)
|
| 64 |
+
|
| 65 |
+
if trajectory:
|
| 66 |
+
collected_data.append({
|
| 67 |
+
"source_file": json_file,
|
| 68 |
+
"user_query": user_query,
|
| 69 |
+
"trajectory": trajectory,
|
| 70 |
+
"total_steps": len(trajectory),
|
| 71 |
+
"timestamp": os.path.getmtime(json_file)
|
| 72 |
+
})
|
| 73 |
+
except Exception as e:
|
| 74 |
+
print(f"Error processing {json_file}: {e}")
|
| 75 |
+
|
| 76 |
+
return collected_data
|
| 77 |
+
|
| 78 |
+
def collect_all(self) -> None:
|
| 79 |
+
"""Collect trajectories from all base directories"""
|
| 80 |
+
for base_dir in self.base_dirs:
|
| 81 |
+
if os.path.exists(base_dir):
|
| 82 |
+
print(f"Collecting from {base_dir}...")
|
| 83 |
+
data = self.collect_from_directory(base_dir)
|
| 84 |
+
self.trajectories.extend(data)
|
| 85 |
+
print(f" Found {len(data)} trajectories")
|
| 86 |
+
|
| 87 |
+
def save_dataset(self, output_file: str) -> None:
|
| 88 |
+
"""Save collected trajectories to file"""
|
| 89 |
+
with open(output_file, 'w') as f:
|
| 90 |
+
json.dump({
|
| 91 |
+
"collection_date": datetime.now().isoformat(),
|
| 92 |
+
"total_trajectories": len(self.trajectories),
|
| 93 |
+
"trajectories": self.trajectories
|
| 94 |
+
}, f, indent=2)
|
| 95 |
+
print(f"Saved {len(self.trajectories)} trajectories to {output_file}")
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Usage example
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
collector = TrajectoryCollector([
|
| 101 |
+
"./eval_t2i_comp_results/",
|
| 102 |
+
"./eval_vbench_results/",
|
| 103 |
+
"./open_domain_results/"
|
| 104 |
+
])
|
| 105 |
+
|
| 106 |
+
collector.collect_all()
|
| 107 |
+
collector.save_dataset("collected_trajectories.json")
|
data_pre/cot_gen/example.jsonl
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
{
|
| 3 |
+
"round_id": 0,
|
| 4 |
+
"plan_agent_input": "...",
|
| 5 |
+
"plan_agent_output": [{
|
| 6 |
+
"thought": "...",
|
| 7 |
+
"aspect": "Subject‑consistency"
|
| 8 |
+
}],
|
| 9 |
+
"promptgen_input": "...",
|
| 10 |
+
"promptgen_output": {
|
| 11 |
+
"thought": "...",
|
| 12 |
+
"prompt": "Generate a 3‑sec video of ..."
|
| 13 |
+
},
|
| 14 |
+
"eval_results": {...}
|
| 15 |
+
}
|
data_pre/cot_gen/generate_training_data.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
from typing import List, Dict, Any
|
| 4 |
+
from base_agent import BaseAgent
|
| 5 |
+
|
| 6 |
+
class TrainingDataGenerator:
|
| 7 |
+
"""Generate diverse training data for plan agent"""
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.query_templates = self.load_query_templates()
|
| 11 |
+
self.tools_t2i = ["Color Binding", "Shape Binding", "Texture Binding", "Non Spatial"]
|
| 12 |
+
self.tools_vbench = [
|
| 13 |
+
"Subject Consistency", "Background Consistency", "Motion Smoothness",
|
| 14 |
+
"Aesthetic Quality", "Imaging Quality", "Object Class", "Human Action",
|
| 15 |
+
"Color", "Spatial Relationship", "Scene"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
def load_query_templates(self) -> Dict[str, List[str]]:
|
| 19 |
+
"""Load diverse query templates"""
|
| 20 |
+
return {
|
| 21 |
+
"capability_check": [
|
| 22 |
+
"Can the model generate {object} with {attribute}?",
|
| 23 |
+
"How well does the model handle {scenario}?",
|
| 24 |
+
"Is the model capable of creating {complex_scene}?"
|
| 25 |
+
],
|
| 26 |
+
"comparison": [
|
| 27 |
+
"How does the model perform on {task1} vs {task2}?",
|
| 28 |
+
"What are the differences in generating {object1} compared to {object2}?"
|
| 29 |
+
],
|
| 30 |
+
"boundary_finding": [
|
| 31 |
+
"What are the limits of the model's ability to {capability}?",
|
| 32 |
+
"How complex can {scenario} be before the model fails?",
|
| 33 |
+
"What is the maximum number of {elements} the model can handle?"
|
| 34 |
+
],
|
| 35 |
+
"quality_assessment": [
|
| 36 |
+
"How consistent is the model in generating {aspect}?",
|
| 37 |
+
"What is the quality of {feature} in the generated outputs?",
|
| 38 |
+
"How accurate is the model's {binding_type} binding?"
|
| 39 |
+
]
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
def generate_diverse_queries(self, n: int = 100) -> List[str]:
|
| 43 |
+
"""Generate diverse evaluation queries"""
|
| 44 |
+
queries = []
|
| 45 |
+
|
| 46 |
+
# Define substitution values
|
| 47 |
+
substitutions = {
|
| 48 |
+
"object": ["cats", "cars", "buildings", "people", "landscapes"],
|
| 49 |
+
"attribute": ["specific colors", "complex textures", "unusual shapes"],
|
| 50 |
+
"scenario": ["multi-object scenes", "dynamic actions", "abstract concepts"],
|
| 51 |
+
"complex_scene": ["crowded marketplaces", "underwater scenes", "futuristic cities"],
|
| 52 |
+
"task1": ["realistic portraits", "abstract art"],
|
| 53 |
+
"task2": ["photorealistic landscapes", "cartoon characters"],
|
| 54 |
+
"capability": ["generate multiple objects", "maintain consistency", "follow complex prompts"],
|
| 55 |
+
"elements": ["objects", "people", "colors", "textures"],
|
| 56 |
+
"aspect": ["human faces", "animal poses", "architectural details"],
|
| 57 |
+
"feature": ["motion", "lighting", "composition"],
|
| 58 |
+
"binding_type": ["color", "shape", "texture", "spatial"]
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
for _ in range(n):
|
| 62 |
+
template_type = random.choice(list(self.query_templates.keys()))
|
| 63 |
+
template = random.choice(self.query_templates[template_type])
|
| 64 |
+
|
| 65 |
+
# Fill in the template
|
| 66 |
+
query = template
|
| 67 |
+
for key, values in substitutions.items():
|
| 68 |
+
if f"{{{key}}}" in query:
|
| 69 |
+
query = query.replace(f"{{{key}}}", random.choice(values))
|
| 70 |
+
|
| 71 |
+
queries.append({
|
| 72 |
+
"query": query,
|
| 73 |
+
"type": template_type
|
| 74 |
+
})
|
| 75 |
+
|
| 76 |
+
return queries
|
| 77 |
+
|
| 78 |
+
def generate_exploration_sequence(self, query: str, query_type: str) -> List[Dict]:
|
| 79 |
+
"""Generate a plausible exploration sequence for a query"""
|
| 80 |
+
sequence = []
|
| 81 |
+
|
| 82 |
+
# Determine exploration strategy based on query type
|
| 83 |
+
if query_type == "capability_check":
|
| 84 |
+
# Start simple, increase complexity
|
| 85 |
+
complexities = ["simple", "moderate", "complex", "very complex"]
|
| 86 |
+
for i, complexity in enumerate(complexities):
|
| 87 |
+
sequence.append({
|
| 88 |
+
"step": i + 1,
|
| 89 |
+
"sub_aspect": f"Testing with {complexity} scenarios",
|
| 90 |
+
"tool": random.choice(self.tools_t2i + self.tools_vbench),
|
| 91 |
+
"strategy": "depth-first"
|
| 92 |
+
})
|
| 93 |
+
|
| 94 |
+
elif query_type == "comparison":
|
| 95 |
+
# Test both aspects separately, then together
|
| 96 |
+
aspects = ["first aspect", "second aspect", "combined comparison"]
|
| 97 |
+
for i, aspect in enumerate(aspects):
|
| 98 |
+
sequence.append({
|
| 99 |
+
"step": i + 1,
|
| 100 |
+
"sub_aspect": f"Evaluating {aspect}",
|
| 101 |
+
"tool": random.choice(self.tools_t2i + self.tools_vbench),
|
| 102 |
+
"strategy": "breadth-first"
|
| 103 |
+
})
|
| 104 |
+
|
| 105 |
+
elif query_type == "boundary_finding":
|
| 106 |
+
# Progressive stress testing
|
| 107 |
+
stress_levels = [10, 50, 90, 99] # percentile of difficulty
|
| 108 |
+
for i, level in enumerate(stress_levels):
|
| 109 |
+
sequence.append({
|
| 110 |
+
"step": i + 1,
|
| 111 |
+
"sub_aspect": f"Testing at {level}th percentile difficulty",
|
| 112 |
+
"tool": random.choice(self.tools_t2i + self.tools_vbench),
|
| 113 |
+
"strategy": "depth-first"
|
| 114 |
+
})
|
| 115 |
+
|
| 116 |
+
else: # quality_assessment
|
| 117 |
+
# Multiple aspects of quality
|
| 118 |
+
quality_aspects = ["consistency", "accuracy", "diversity", "edge cases"]
|
| 119 |
+
for i, aspect in enumerate(quality_aspects):
|
| 120 |
+
sequence.append({
|
| 121 |
+
"step": i + 1,
|
| 122 |
+
"sub_aspect": f"Assessing {aspect}",
|
| 123 |
+
"tool": random.choice(self.tools_t2i + self.tools_vbench),
|
| 124 |
+
"strategy": "breadth-first"
|
| 125 |
+
})
|
| 126 |
+
|
| 127 |
+
return sequence
|
| 128 |
+
|
| 129 |
+
def create_training_example(self, query_data: Dict) -> Dict:
|
| 130 |
+
"""Create a complete training example"""
|
| 131 |
+
query = query_data["query"]
|
| 132 |
+
query_type = query_data["type"]
|
| 133 |
+
|
| 134 |
+
# Generate exploration sequence
|
| 135 |
+
exploration = self.generate_exploration_sequence(query, query_type)
|
| 136 |
+
|
| 137 |
+
# Create training example
|
| 138 |
+
example = {
|
| 139 |
+
"user_query": query,
|
| 140 |
+
"query_type": query_type,
|
| 141 |
+
"exploration_plan": {
|
| 142 |
+
"strategy": "depth-first" if "boundary" in query_type else "breadth-first",
|
| 143 |
+
"expected_steps": len(exploration),
|
| 144 |
+
"focus_areas": [step["sub_aspect"] for step in exploration]
|
| 145 |
+
},
|
| 146 |
+
"exploration_sequence": exploration,
|
| 147 |
+
"decision_points": []
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
# Add decision points
|
| 151 |
+
for i, step in enumerate(exploration):
|
| 152 |
+
decision = {
|
| 153 |
+
"step": i + 1,
|
| 154 |
+
"context": {
|
| 155 |
+
"previous_steps": exploration[:i],
|
| 156 |
+
"current_observations": f"Simulated results from step {i}"
|
| 157 |
+
},
|
| 158 |
+
"decision": "explore" if i < len(exploration) - 1 else "summarize",
|
| 159 |
+
"reasoning": f"Need to explore {step['sub_aspect']} to fully answer the query"
|
| 160 |
+
}
|
| 161 |
+
example["decision_points"].append(decision)
|
| 162 |
+
|
| 163 |
+
return example
|
| 164 |
+
|
| 165 |
+
def generate_dataset(self, n_examples: int = 1000) -> List[Dict]:
|
| 166 |
+
"""Generate complete training dataset"""
|
| 167 |
+
queries = self.generate_diverse_queries(n_examples)
|
| 168 |
+
dataset = []
|
| 169 |
+
|
| 170 |
+
for query_data in queries:
|
| 171 |
+
example = self.create_training_example(query_data)
|
| 172 |
+
dataset.append(example)
|
| 173 |
+
|
| 174 |
+
return dataset
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
# Usage
|
| 178 |
+
if __name__ == "__main__":
|
| 179 |
+
generator = TrainingDataGenerator()
|
| 180 |
+
|
| 181 |
+
# Generate training data
|
| 182 |
+
print("Generating training dataset...")
|
| 183 |
+
dataset = generator.generate_dataset(1000)
|
| 184 |
+
|
| 185 |
+
# Save dataset
|
| 186 |
+
with open("plan_agent_training_data.json", "w") as f:
|
| 187 |
+
json.dump({
|
| 188 |
+
"version": "1.0",
|
| 189 |
+
"total_examples": len(dataset),
|
| 190 |
+
"examples": dataset
|
| 191 |
+
}, f, indent=2)
|
| 192 |
+
|
| 193 |
+
print(f"Generated {len(dataset)} training examples")
|
data_pre/cot_gen/main.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import argparse
|
| 5 |
+
from typing import Dict, List, Optional
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from autogen_agentchat.agents import AssistantAgent
|
| 8 |
+
from autogen_agentchat.messages import TextMessage
|
| 9 |
+
from autogen_agentchat.ui import Console
|
| 10 |
+
from autogen_core import CancellationToken
|
| 11 |
+
from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
|
| 12 |
+
from autogen_core.models import ModelFamily
|
| 13 |
+
from prompts import thought_prompt, sys_prompt, identity_prompt
|
| 14 |
+
from tools import *
|
| 15 |
+
from utils import extract_json, identity_mapping_dict, setup_logging_and_config, process_qa, rag_url_dict
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
|
| 18 |
+
# max steps for the agent to generate the answer
|
| 19 |
+
MAX_STEPS = 20
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class AgentResponse:
|
| 23 |
+
"""Class to represent agent response structure."""
|
| 24 |
+
def __init__(self, thoughts: str, response: str):
|
| 25 |
+
self.thoughts = thoughts
|
| 26 |
+
self.response = response
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def parse_arguments() -> argparse.Namespace:
|
| 30 |
+
"""Parse command line arguments."""
|
| 31 |
+
parser = argparse.ArgumentParser(
|
| 32 |
+
description="Run EgoLife QA agent with chain-of-thought reasoning"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
# Model configuration
|
| 36 |
+
parser.add_argument(
|
| 37 |
+
"--model",
|
| 38 |
+
type=str,
|
| 39 |
+
default="gpt-4.1",
|
| 40 |
+
help="Model to use for the agent"
|
| 41 |
+
)
|
| 42 |
+
parser.add_argument(
|
| 43 |
+
"--api_version",
|
| 44 |
+
type=str,
|
| 45 |
+
default="2024-09-01-preview",
|
| 46 |
+
help="API version for the model"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Data configuration
|
| 50 |
+
parser.add_argument(
|
| 51 |
+
"--data_path",
|
| 52 |
+
type=str,
|
| 53 |
+
default="./egor1-bench/QA-egolife/",
|
| 54 |
+
help="Path to the data directory"
|
| 55 |
+
)
|
| 56 |
+
parser.add_argument(
|
| 57 |
+
"--identity",
|
| 58 |
+
type=str,
|
| 59 |
+
default="A1",
|
| 60 |
+
help="Identity to use for the agent"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# Output configuration
|
| 64 |
+
parser.add_argument(
|
| 65 |
+
"--result_dir",
|
| 66 |
+
type=str,
|
| 67 |
+
default="./results",
|
| 68 |
+
help="Directory to save results"
|
| 69 |
+
)
|
| 70 |
+
parser.add_argument(
|
| 71 |
+
"--log_dir",
|
| 72 |
+
type=str,
|
| 73 |
+
default="./logs",
|
| 74 |
+
help="Directory to save logs"
|
| 75 |
+
)
|
| 76 |
+
parser.add_argument(
|
| 77 |
+
"--cache_dir",
|
| 78 |
+
type=str,
|
| 79 |
+
default="./cache",
|
| 80 |
+
help="Directory for caching"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Processing options
|
| 84 |
+
parser.add_argument(
|
| 85 |
+
"--explicit_answer",
|
| 86 |
+
action="store_true",
|
| 87 |
+
help="Use explicit answer termination"
|
| 88 |
+
)
|
| 89 |
+
parser.add_argument(
|
| 90 |
+
"--observation_type",
|
| 91 |
+
type=str,
|
| 92 |
+
default="all_actions",
|
| 93 |
+
choices=["single", "all", "all_actions", "null"],
|
| 94 |
+
help="Type of observation to include in prompts"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Resume and specific data options
|
| 98 |
+
parser.add_argument(
|
| 99 |
+
"--resume",
|
| 100 |
+
action="store_true",
|
| 101 |
+
help="Resume processing from error files"
|
| 102 |
+
)
|
| 103 |
+
parser.add_argument(
|
| 104 |
+
"--gen_specific_data",
|
| 105 |
+
action="store_true",
|
| 106 |
+
help="Process only specific data IDs"
|
| 107 |
+
)
|
| 108 |
+
parser.add_argument(
|
| 109 |
+
"--specific_data_path",
|
| 110 |
+
type=str,
|
| 111 |
+
default="./data_statistics/error_list_results_aobs_gpt-41_A1.txt",
|
| 112 |
+
help="Path to specific data list file (.txt or .json)"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
return parser.parse_args()
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def load_json_data(file_path: str) -> List[Dict]:
|
| 119 |
+
"""Load JSON data from file with error handling."""
|
| 120 |
+
try:
|
| 121 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 122 |
+
return json.load(f)
|
| 123 |
+
except (json.JSONDecodeError, FileNotFoundError, UnicodeDecodeError) as e:
|
| 124 |
+
raise ValueError(f"Failed to load {file_path}: {e}")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def load_specific_data_ids(file_path: str) -> List[int]:
|
| 128 |
+
"""Load specific data IDs from a text or JSON file."""
|
| 129 |
+
if file_path.endswith(".txt"):
|
| 130 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 131 |
+
data_str = f.readlines()[0].strip()
|
| 132 |
+
# Handle both comma-separated and bracket-enclosed formats
|
| 133 |
+
data_str = data_str.strip("[]")
|
| 134 |
+
return [int(x.strip()) for x in data_str.split(",") if x.strip()]
|
| 135 |
+
|
| 136 |
+
elif file_path.endswith(".json"):
|
| 137 |
+
error_data = load_json_data(file_path)
|
| 138 |
+
return [int(item["ID"]) for item in error_data]
|
| 139 |
+
|
| 140 |
+
else:
|
| 141 |
+
raise ValueError(f"Unsupported file format: {file_path}")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def load_resume_data_ids(identity: str) -> List[int]:
|
| 145 |
+
"""Load data IDs that need to be resumed from error files."""
|
| 146 |
+
error_ids = []
|
| 147 |
+
|
| 148 |
+
# Load NA errors
|
| 149 |
+
na_error_path = f"data_gen/errors/error_list_na_{identity}.json"
|
| 150 |
+
if os.path.exists(na_error_path):
|
| 151 |
+
try:
|
| 152 |
+
na_errors = load_json_data(na_error_path)
|
| 153 |
+
error_ids.extend([int(d["ID"]) for d in na_errors])
|
| 154 |
+
except Exception as e:
|
| 155 |
+
print(f"Warning: Could not load NA errors from {na_error_path}: {e}")
|
| 156 |
+
|
| 157 |
+
# Load no-answer errors (incomplete processing)
|
| 158 |
+
no_answer_path = f"data_gen/errors/error_list_no_answer_{identity}.json"
|
| 159 |
+
if os.path.exists(no_answer_path):
|
| 160 |
+
try:
|
| 161 |
+
no_answer_errors = load_json_data(no_answer_path)
|
| 162 |
+
error_ids.extend([
|
| 163 |
+
int(error_d["ID"])
|
| 164 |
+
for error_d in no_answer_errors
|
| 165 |
+
if len(error_d.get("cot", [])) < MAX_STEPS
|
| 166 |
+
])
|
| 167 |
+
except Exception as e:
|
| 168 |
+
print(f"Warning: Could not load no-answer errors from {no_answer_path}: {e}")
|
| 169 |
+
|
| 170 |
+
return list(set(error_ids)) # Remove duplicates
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def filter_data_by_ids(data: List[Dict], target_ids: List[int]) -> List[Dict]:
|
| 174 |
+
"""Filter data to only include items with IDs in target_ids."""
|
| 175 |
+
return [item for item in data if item["ID"] in target_ids]
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def setup_model_client(args: argparse.Namespace) -> AzureOpenAIChatCompletionClient:
|
| 179 |
+
"""Set up the model client based on arguments."""
|
| 180 |
+
endpoint, deployment, subscription_key = setup_logging_and_config(args.model)
|
| 181 |
+
|
| 182 |
+
if args.model == "gpt-4.1":
|
| 183 |
+
return AzureOpenAIChatCompletionClient(
|
| 184 |
+
azure_deployment=deployment,
|
| 185 |
+
azure_endpoint=endpoint,
|
| 186 |
+
model="gpt-41",
|
| 187 |
+
api_version="2025-01-01-preview",
|
| 188 |
+
api_key=subscription_key,
|
| 189 |
+
model_info={
|
| 190 |
+
"family": ModelFamily.GPT_41,
|
| 191 |
+
"function_calling": True,
|
| 192 |
+
"json_output": True,
|
| 193 |
+
"structured_output": True,
|
| 194 |
+
"vision": False,
|
| 195 |
+
}
|
| 196 |
+
)
|
| 197 |
+
else:
|
| 198 |
+
return AzureOpenAIChatCompletionClient(
|
| 199 |
+
azure_deployment=deployment,
|
| 200 |
+
azure_endpoint=endpoint,
|
| 201 |
+
model=args.model,
|
| 202 |
+
api_version=args.api_version,
|
| 203 |
+
api_key=subscription_key,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def get_tools(args: argparse.Namespace) -> List:
|
| 208 |
+
"""Get the appropriate tools based on arguments."""
|
| 209 |
+
if args.explicit_answer:
|
| 210 |
+
return [rag, video_llm, vlm, terminate_explicit]
|
| 211 |
+
else:
|
| 212 |
+
return [rag, video_llm, vlm, terminate]
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def get_system_prompt(args: argparse.Namespace) -> str:
|
| 216 |
+
"""Get the appropriate system prompt based on version."""
|
| 217 |
+
return sys_prompt + f"\n{identity_prompt.format(identity=f'{args.identity}_{identity_mapping_dict[args.identity]}')}"
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def create_resume_prompt(dp: Dict, base_prompt: List[TextMessage]) -> List[TextMessage]:
|
| 221 |
+
"""Create a prompt for resuming from previous CoT steps."""
|
| 222 |
+
prompt = base_prompt.copy()
|
| 223 |
+
|
| 224 |
+
if dp.get("cot"):
|
| 225 |
+
prompt.append(TextMessage(
|
| 226 |
+
source="assistant",
|
| 227 |
+
content=f'Previous observations: {dp["cot"]}'
|
| 228 |
+
))
|
| 229 |
+
prompt.append(TextMessage(
|
| 230 |
+
source="user",
|
| 231 |
+
content=(
|
| 232 |
+
"Now you are given the previous actions and observations you have made before, "
|
| 233 |
+
"continue to try your best to answer the question using different tools. "
|
| 234 |
+
f"You must provide an answer to the question before step {MAX_STEPS}."
|
| 235 |
+
)
|
| 236 |
+
))
|
| 237 |
+
|
| 238 |
+
return prompt
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def update_prompt_with_observation(
|
| 242 |
+
prompt: List[TextMessage],
|
| 243 |
+
observation: Dict,
|
| 244 |
+
observation_type: str
|
| 245 |
+
) -> List[TextMessage]:
|
| 246 |
+
"""Update prompt with new observation based on observation type."""
|
| 247 |
+
if observation_type == "single":
|
| 248 |
+
# Replace with single latest observation
|
| 249 |
+
return [
|
| 250 |
+
prompt[0],
|
| 251 |
+
TextMessage(source="assistant", content=f'Previous observations: {observation}')
|
| 252 |
+
]
|
| 253 |
+
elif observation_type == "all":
|
| 254 |
+
# Append all previous observations
|
| 255 |
+
prompt.append(TextMessage(
|
| 256 |
+
source="assistant",
|
| 257 |
+
content=f'Previous observations (step-{observation["step"]}): {observation}'
|
| 258 |
+
))
|
| 259 |
+
elif observation_type == "all_actions":
|
| 260 |
+
# Append all previous actions
|
| 261 |
+
prompt.append(TextMessage(
|
| 262 |
+
source="assistant",
|
| 263 |
+
content=f'Previous actions (step-{observation["step"]}): {observation["tool"]}'
|
| 264 |
+
))
|
| 265 |
+
elif observation_type == "null":
|
| 266 |
+
# Don't append any observation
|
| 267 |
+
pass
|
| 268 |
+
|
| 269 |
+
return prompt
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
async def process_single_qa(
|
| 273 |
+
qa: Dict,
|
| 274 |
+
agent: AssistantAgent,
|
| 275 |
+
args: argparse.Namespace,
|
| 276 |
+
result_dir: str
|
| 277 |
+
) -> Optional[Dict]:
|
| 278 |
+
"""Process a single QA item with the agent."""
|
| 279 |
+
dp = process_qa(qa, args.explicit_answer)
|
| 280 |
+
dp_path = os.path.join(result_dir, f"{dp['ID']}.json")
|
| 281 |
+
|
| 282 |
+
# Handle existing files
|
| 283 |
+
if os.path.exists(dp_path) and not args.resume:
|
| 284 |
+
print(f"Overwriting {dp['ID']}")
|
| 285 |
+
|
| 286 |
+
if args.resume and os.path.exists(dp_path):
|
| 287 |
+
print(f"Resuming {dp['ID']}")
|
| 288 |
+
with open(dp_path, "r", encoding="utf-8") as f:
|
| 289 |
+
dp = json.load(f)
|
| 290 |
+
|
| 291 |
+
# Create initial prompt
|
| 292 |
+
base_prompt = [TextMessage(
|
| 293 |
+
content=dp["question"] + "\n\n" + thought_prompt,
|
| 294 |
+
source="user"
|
| 295 |
+
)]
|
| 296 |
+
|
| 297 |
+
# Set up for resume if needed
|
| 298 |
+
if args.resume and dp.get("cot") and len(dp["cot"]) > 0:
|
| 299 |
+
step = len(dp["cot"]) - 1
|
| 300 |
+
print(f"Resuming from {dp['ID']} at step {step}")
|
| 301 |
+
dp["cot"] = dp["cot"][:-1] # Remove last incomplete step
|
| 302 |
+
prompt = create_resume_prompt(dp, base_prompt)
|
| 303 |
+
else:
|
| 304 |
+
step = 0
|
| 305 |
+
prompt = base_prompt
|
| 306 |
+
|
| 307 |
+
# Main processing loop
|
| 308 |
+
while step < MAX_STEPS:
|
| 309 |
+
print(f"Step: {step}")
|
| 310 |
+
try:
|
| 311 |
+
result = await Console(agent.run_stream(
|
| 312 |
+
task=prompt,
|
| 313 |
+
cancellation_token=CancellationToken()
|
| 314 |
+
))
|
| 315 |
+
except Exception as e:
|
| 316 |
+
print(f"Error at step {step}: {e}")
|
| 317 |
+
return {
|
| 318 |
+
"id": dp["ID"],
|
| 319 |
+
"prompt": [msg.content for msg in prompt],
|
| 320 |
+
"error": str(e),
|
| 321 |
+
"step": step
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
step += 1
|
| 325 |
+
messages = result.messages
|
| 326 |
+
|
| 327 |
+
# Extract information from messages
|
| 328 |
+
thought = None
|
| 329 |
+
tool_call = None
|
| 330 |
+
tool_summary = None
|
| 331 |
+
|
| 332 |
+
for message in messages:
|
| 333 |
+
if message.type == "ThoughtEvent":
|
| 334 |
+
thought = message.content
|
| 335 |
+
elif message.type == "ToolCallRequestEvent":
|
| 336 |
+
tool_call = message.content[0]
|
| 337 |
+
elif message.type == "ToolCallSummaryMessage":
|
| 338 |
+
tool_summary = message.content
|
| 339 |
+
|
| 340 |
+
# Handle termination
|
| 341 |
+
if tool_call and "terminate" in tool_call.name.lower():
|
| 342 |
+
observation = {
|
| 343 |
+
"step": step,
|
| 344 |
+
"thought": thought,
|
| 345 |
+
"answer": extract_json(tool_call.arguments)["answer"]
|
| 346 |
+
}
|
| 347 |
+
dp['cot'].append(observation)
|
| 348 |
+
break
|
| 349 |
+
|
| 350 |
+
# Handle regular tool usage
|
| 351 |
+
else:
|
| 352 |
+
observation = {
|
| 353 |
+
"step": step,
|
| 354 |
+
"thought": thought,
|
| 355 |
+
"tool": {
|
| 356 |
+
"id": tool_call.id if tool_call else None,
|
| 357 |
+
"name": tool_call.name if tool_call else None,
|
| 358 |
+
"arguments": tool_call.arguments if tool_call else None
|
| 359 |
+
},
|
| 360 |
+
"observation": tool_summary
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
dp['cot'].append(observation)
|
| 364 |
+
prompt = update_prompt_with_observation(prompt, observation, args.observation_type)
|
| 365 |
+
|
| 366 |
+
# Save results
|
| 367 |
+
with open(dp_path, "w", encoding="utf-8") as f:
|
| 368 |
+
json.dump(dp, f, indent=4)
|
| 369 |
+
|
| 370 |
+
return None # No error
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
async def main() -> None:
|
| 374 |
+
"""Main function to orchestrate the QA processing."""
|
| 375 |
+
args = parse_arguments()
|
| 376 |
+
|
| 377 |
+
# Set up environment
|
| 378 |
+
os.environ["LOG_DIR"] = args.log_dir
|
| 379 |
+
if os.environ.get("IDENTITY") is None:
|
| 380 |
+
os.environ["IDENTITY"] = args.identity
|
| 381 |
+
if os.environ.get("RAG_URL") is None:
|
| 382 |
+
os.environ["RAG_URL"] = rag_url_dict[args.identity]
|
| 383 |
+
assert os.environ["RAG_URL"] is not None, "RAG_URL is not set"
|
| 384 |
+
cache_dir = os.environ.get("CACHE_DIR", args.cache_dir)
|
| 385 |
+
os.makedirs(args.result_dir, exist_ok=True)
|
| 386 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 387 |
+
os.makedirs(args.log_dir, exist_ok=True)
|
| 388 |
+
|
| 389 |
+
# Load data based on mode
|
| 390 |
+
data_path = os.path.join(
|
| 391 |
+
args.data_path,
|
| 392 |
+
f"EgoLifeQA_{args.identity}_{identity_mapping_dict[args.identity]}.json"
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
print(f"Current identity: {args.identity}")
|
| 396 |
+
|
| 397 |
+
try:
|
| 398 |
+
if args.gen_specific_data:
|
| 399 |
+
print("Loading specific data...")
|
| 400 |
+
# Use specific_data_path or fall back to specific_data_list for compatibility
|
| 401 |
+
specific_path = args.specific_data_path
|
| 402 |
+
if not os.path.exists(specific_path) and hasattr(args, 'specific_data_list'):
|
| 403 |
+
specific_path = args.specific_data_list
|
| 404 |
+
|
| 405 |
+
if os.path.exists(specific_path):
|
| 406 |
+
target_ids = load_specific_data_ids(specific_path)
|
| 407 |
+
else:
|
| 408 |
+
# Fallback to default path format
|
| 409 |
+
default_path = f"./data_statistics/error_list_{args.identity}.txt"
|
| 410 |
+
target_ids = load_specific_data_ids(default_path)
|
| 411 |
+
|
| 412 |
+
all_data = load_json_data(data_path)
|
| 413 |
+
egolife_qa_data = filter_data_by_ids(all_data, target_ids)
|
| 414 |
+
print(f"Loaded {len(egolife_qa_data)} specific items")
|
| 415 |
+
|
| 416 |
+
elif args.resume:
|
| 417 |
+
print("Loading resume data...")
|
| 418 |
+
target_ids = load_resume_data_ids(args.identity)
|
| 419 |
+
all_data = load_json_data(data_path)
|
| 420 |
+
egolife_qa_data = filter_data_by_ids(all_data, target_ids)
|
| 421 |
+
print(f"Loaded {len(egolife_qa_data)} items for resume")
|
| 422 |
+
|
| 423 |
+
else:
|
| 424 |
+
print(f"Loading all data from: {data_path}")
|
| 425 |
+
egolife_qa_data = load_json_data(data_path)
|
| 426 |
+
print(f"Loaded {len(egolife_qa_data)} items")
|
| 427 |
+
|
| 428 |
+
except Exception as e:
|
| 429 |
+
print(f"Error loading data: {e}")
|
| 430 |
+
return
|
| 431 |
+
|
| 432 |
+
if not egolife_qa_data:
|
| 433 |
+
print("No data to process.")
|
| 434 |
+
return
|
| 435 |
+
|
| 436 |
+
# Set up model and agent components
|
| 437 |
+
model_client = setup_model_client(args)
|
| 438 |
+
tools = get_tools(args)
|
| 439 |
+
sys_prompt_text = get_system_prompt(args)
|
| 440 |
+
|
| 441 |
+
# Process data
|
| 442 |
+
errors = []
|
| 443 |
+
|
| 444 |
+
for qa in tqdm(egolife_qa_data, desc="Processing QA items"):
|
| 445 |
+
# Create fresh agent for each QA to avoid state issues
|
| 446 |
+
agent = AssistantAgent(
|
| 447 |
+
name="egolife_qa_agent",
|
| 448 |
+
model_client=model_client,
|
| 449 |
+
tools=tools,
|
| 450 |
+
system_message=sys_prompt_text,
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
error = await process_single_qa(qa, agent, args, args.result_dir)
|
| 454 |
+
if error:
|
| 455 |
+
errors.append(error)
|
| 456 |
+
|
| 457 |
+
# Save error list if any
|
| 458 |
+
if errors:
|
| 459 |
+
error_file = os.path.join(args.log_dir, f"error_list_{args.identity}.json")
|
| 460 |
+
with open(error_file, "w", encoding="utf-8") as f:
|
| 461 |
+
json.dump(errors, f, indent=4)
|
| 462 |
+
print(f"Saved {len(errors)} errors to {error_file}")
|
| 463 |
+
else:
|
| 464 |
+
print("No errors encountered!")
|
| 465 |
+
|
| 466 |
+
# Cleanup
|
| 467 |
+
print("Processing complete!")
|
| 468 |
+
if cache_dir and os.path.exists(cache_dir):
|
| 469 |
+
try:
|
| 470 |
+
os.rmdir(cache_dir)
|
| 471 |
+
except OSError:
|
| 472 |
+
pass # Directory not empty or doesn't exist
|
| 473 |
+
|
| 474 |
+
await model_client.close()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
if __name__ == "__main__":
|
| 478 |
+
asyncio.run(main())
|
data_pre/cot_gen/prompts.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# used for vanilla A-B-C-D task
|
| 2 |
+
|
| 3 |
+
# consider the cost of each tool, and the video obsercation length is 10-min max
|
| 4 |
+
sys_prompt = """
|
| 5 |
+
[BEGIN OF GOAL]
|
| 6 |
+
You are an expert AI assistant specializing in analyzing human behavior and reasoning from egocentric video descriptions. You will be provided with a list of useful tools to help in reasoning the task, and your goal is to solve the user’s question. The user’s question is following the format: Question: <question> <timestamp> Options: <options>. You can either rely on your own capabilities or perform actions with external tools to help you. You should consider both the frequency and cost of each tool to make the best decision.
|
| 7 |
+
[END OF GOAL]
|
| 8 |
+
|
| 9 |
+
[BEGIN OF FORMAT INSTRUCTIONS]
|
| 10 |
+
When answering questions:
|
| 11 |
+
1. You will be provided with previous actions you have taken, based on these actions, think step-by-step about how to approach the problem.
|
| 12 |
+
2. Show your reasoning process clearly before providing your next action.
|
| 13 |
+
3. The video observation length is 10-min max.
|
| 14 |
+
4. For visual questions, use video_llm and vlm to explore the visual context.
|
| 15 |
+
5. For temporal questions, use RAG to explore the context before and after the event.
|
| 16 |
+
6. Only use the terminate tool after you have thoroughly explored the question with multiple tools.
|
| 17 |
+
[END OF FORMAT INSTRUCTIONS]
|
| 18 |
+
|
| 19 |
+
[BEGIN OF HINTS]
|
| 20 |
+
1. All tools provided are crucial to the solvement of the question. You MUST exploit the usage of all tools before answering the question.
|
| 21 |
+
2. You may want to use the same tool multiple times with different arguments to explore the problem from different angles, if needed.
|
| 22 |
+
3. Make a balance between the cost and the frequency of the tools.
|
| 23 |
+
4. Usually, solving a question requires over 5~10 steps of reasoning, and follows a hierarchical calling structure: rag => video_llm => vlm.
|
| 24 |
+
5. Do not use the terminate tool too early. Instead, try to explore the question with the available tools, and only use the terminate tool when you are confident enough or have considered all the options.
|
| 25 |
+
[END OF HINTS]
|
| 26 |
+
|
| 27 |
+
Always structure your responses with your thought process first, followed by any tool calls.
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
thought_prompt = "Think before you act. Think step-by-step about what information you need and which tool to use, then execute your plan exactly as reasoned without deviation. Output your thought process before using the tool, and you must strictly follow your thought process for the tool call."
|
| 34 |
+
|
| 35 |
+
identity_prompt = "Currently, you are under the view of: {identity}"
|
data_pre/postprocess.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
import glob
|
| 5 |
+
import argparse
|
| 6 |
+
from datasets import load_dataset, concatenate_datasets
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import shutil
|
| 9 |
+
import chardet
|
| 10 |
+
import ast
|
| 11 |
+
import transformers
|
| 12 |
+
|
| 13 |
+
# Add parent directory to Python path
|
| 14 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 15 |
+
from eval_agent.system_prompts import sys_prompts
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
vbench_dimention_df = pd.read_csv("eval_agent/vbench_dimension_scores.tsv", sep="\t")
|
| 19 |
+
t2i_dimention_df = pd.read_csv("eval_agent/t2i_dimension_scores.tsv", sep="\t")
|
| 20 |
+
|
| 21 |
+
# Templates for different components
|
| 22 |
+
alpaca_template = {
|
| 23 |
+
"instruction": "{instruction}",
|
| 24 |
+
"input": "{input}",
|
| 25 |
+
"output": "{output}",
|
| 26 |
+
"system": "{system}"
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
thinking_template = "<think>{thinking}</think>"
|
| 31 |
+
tool_template = "<tool>{tool}</tool>"
|
| 32 |
+
|
| 33 |
+
observation_template = "<information>{information}</information>"
|
| 34 |
+
|
| 35 |
+
analysis_template = "<analysis>{analysis}</analysis>"
|
| 36 |
+
summary_template = "<summary>{summary}</summary>"
|
| 37 |
+
|
| 38 |
+
# Global counter for tracking data
|
| 39 |
+
global_counter = 0
|
| 40 |
+
length_counter = 0
|
| 41 |
+
|
| 42 |
+
def format_subaspect(sub_aspect):
|
| 43 |
+
"""Format sub-aspect for output."""
|
| 44 |
+
import random
|
| 45 |
+
template_list = [
|
| 46 |
+
"I will evaluate the model's sub-aspect: {sub_aspect}.",
|
| 47 |
+
"I will focus on the {sub_aspect} sub-aspect of the model.",
|
| 48 |
+
"Let me assess the {sub_aspect} sub-aspect of this model.",
|
| 49 |
+
"I need to examine the model's {sub_aspect} sub-aspect.",
|
| 50 |
+
"Now I will analyze the {sub_aspect} sub-aspect dimension.",
|
| 51 |
+
"I'll investigate the {sub_aspect} sub-aspect quality of the model.",
|
| 52 |
+
"Time to evaluate the {sub_aspect} sub-aspect performance.",
|
| 53 |
+
"I should check the model's {sub_aspect} sub-aspect capabilities."
|
| 54 |
+
]
|
| 55 |
+
selected_template = random.choice(template_list)
|
| 56 |
+
return selected_template.format(sub_aspect=sub_aspect)
|
| 57 |
+
|
| 58 |
+
def format_summary(analysis, summary):
|
| 59 |
+
"""Format summary for output."""
|
| 60 |
+
return f"Analysis: {analysis}\nSummary: {summary}"
|
| 61 |
+
|
| 62 |
+
def load_data(file_path):
|
| 63 |
+
"""Load JSON data from a file."""
|
| 64 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 65 |
+
return json.load(f)
|
| 66 |
+
|
| 67 |
+
def format_template(template, **kwargs):
|
| 68 |
+
"""Format a template with provided values."""
|
| 69 |
+
if isinstance(template, dict):
|
| 70 |
+
result = {}
|
| 71 |
+
for key, value in template.items():
|
| 72 |
+
if isinstance(value, str):
|
| 73 |
+
result[key] = value.format(**kwargs)
|
| 74 |
+
else:
|
| 75 |
+
result[key] = value
|
| 76 |
+
return result
|
| 77 |
+
return template.format(**kwargs)
|
| 78 |
+
|
| 79 |
+
def extract_obs(tool_name, obs):
|
| 80 |
+
"""Extract observation information for a tool."""
|
| 81 |
+
return f"Observation: {obs}"
|
| 82 |
+
|
| 83 |
+
def check_data(data):
|
| 84 |
+
"""Check if data is valid."""
|
| 85 |
+
if len(data["cot"]) > 8:
|
| 86 |
+
return False
|
| 87 |
+
if data["cot"][-1]["answer"] != data["ground_truth"]:
|
| 88 |
+
return False
|
| 89 |
+
return True
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# def format_eval_results(eval_results: dict) -> list:
|
| 93 |
+
# """Format eval results for output."""
|
| 94 |
+
# score = eval_results['score'][0]
|
| 95 |
+
# video_results_list = eval_results['score'][1] # list of dict
|
| 96 |
+
|
| 97 |
+
# # remove the video path
|
| 98 |
+
# for video_result in video_results_list:
|
| 99 |
+
# video_result.pop('video_path')
|
| 100 |
+
|
| 101 |
+
# return score, video_results_list
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# format the reference table
|
| 105 |
+
def format_dimension_as_string(df, dimension_name):
|
| 106 |
+
row = df.loc[df['Dimension'] == dimension_name]
|
| 107 |
+
if row.empty:
|
| 108 |
+
return f"No data found for dimension: {dimension_name}"
|
| 109 |
+
|
| 110 |
+
formatted_string = (
|
| 111 |
+
f"{row['Dimension'].values[0]}: "
|
| 112 |
+
f"Very High -> {row['Very High'].values[0]}, "
|
| 113 |
+
f"High -> {row['High'].values[0]}, "
|
| 114 |
+
f"Moderate -> {row['Moderate'].values[0]}, "
|
| 115 |
+
f"Low -> {row['Low'].values[0]}, "
|
| 116 |
+
f"Very Low -> {row['Very Low'].values[0]}"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
return formatted_string
|
| 120 |
+
|
| 121 |
+
def format_eval_results(results, reference_table):
|
| 122 |
+
tool_name = results["Tool"]
|
| 123 |
+
average_score = results["eval_results"]["score"][0]
|
| 124 |
+
video_results = results["eval_results"]["score"][1]
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# More concise and structured format for SFT
|
| 128 |
+
output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
|
| 129 |
+
output += f"Results:\n"
|
| 130 |
+
output += f"- Overall score: {average_score:.4f}\n"
|
| 131 |
+
output += f"- Per-prompt scores:\n"
|
| 132 |
+
|
| 133 |
+
for video in video_results:
|
| 134 |
+
prompt = video["prompt"]
|
| 135 |
+
score = video["video_results"]
|
| 136 |
+
output += f" • \"{prompt}\": {score:.4f}\n"
|
| 137 |
+
|
| 138 |
+
return output
|
| 139 |
+
|
| 140 |
+
# Main function to convert the data to the Alpaca format
|
| 141 |
+
def convert_to_alpaca(json_path, output_dir, return_data=False):
|
| 142 |
+
"""Convert data to Alpaca format for training."""
|
| 143 |
+
global global_counter
|
| 144 |
+
data_list = []
|
| 145 |
+
# Process each file
|
| 146 |
+
with open(json_path, "r", encoding="utf-8") as in_f:
|
| 147 |
+
data = json.load(in_f)
|
| 148 |
+
|
| 149 |
+
# remove the last element
|
| 150 |
+
data.pop()
|
| 151 |
+
|
| 152 |
+
# data["ID"] = global_counter
|
| 153 |
+
ops = []
|
| 154 |
+
obs = []
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# Generate the history
|
| 159 |
+
for i in range(1, len(data)):
|
| 160 |
+
# Prepare the output
|
| 161 |
+
try:
|
| 162 |
+
if i == len(data) - 1: # last step
|
| 163 |
+
op = f"{thinking_template.format(thinking=data[i]['Thought'])}{summary_template.format(summary=format_summary(data[i]['Analysis'], data[i]['Summary']))}"
|
| 164 |
+
else:
|
| 165 |
+
op = f"{thinking_template.format(thinking=data[i]['Thought'] + ' ' + format_subaspect(data[i]['Sub-aspect']))}{tool_template.format(tool=data[i]['Tool'])}"
|
| 166 |
+
|
| 167 |
+
# only n-1 steps have observation
|
| 168 |
+
# obs.append(observation_template.format(information=extract_obs(data["cot"][i]["tool"]["name"], data["cot"][i]["observation"])))
|
| 169 |
+
# score, video_results_list = format_eval_results(data[i]['eval_results'])
|
| 170 |
+
# obs.append(observation_template.format(info0=score, info1=video_results_list)) # Current observation is the eval_results
|
| 171 |
+
reference_table = format_dimension_as_string(vbench_dimention_df, data[i]['Tool'])
|
| 172 |
+
obs.append(observation_template.format(information=format_eval_results(data[i], reference_table)))
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
print(f"Error in processing data {json_path} at step {i}: {e}")
|
| 177 |
+
continue
|
| 178 |
+
|
| 179 |
+
ops.append(op)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
# Build history for this step
|
| 183 |
+
history = []
|
| 184 |
+
for j in range(1, i): # Start from 1 since we process from step 1
|
| 185 |
+
if j == 1:
|
| 186 |
+
traj = [
|
| 187 |
+
data[0],
|
| 188 |
+
ops[j-1] # ops is 0-indexed but we start processing from step 1
|
| 189 |
+
]
|
| 190 |
+
else:
|
| 191 |
+
traj = [
|
| 192 |
+
obs[j-2], # obs is built as we go
|
| 193 |
+
ops[j-1]
|
| 194 |
+
]
|
| 195 |
+
|
| 196 |
+
history.append(traj)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# Convert the data to the Alpaca format at the n-th step
|
| 202 |
+
if i == 1: # First step after initial instruction
|
| 203 |
+
data_n = format_template(alpaca_template, **{
|
| 204 |
+
"instruction": data[0],
|
| 205 |
+
"input": "",
|
| 206 |
+
"output": op,
|
| 207 |
+
"system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
|
| 208 |
+
})
|
| 209 |
+
else:
|
| 210 |
+
data_n = format_template(alpaca_template, **{
|
| 211 |
+
"instruction": obs[i-2], # Previous observation
|
| 212 |
+
"input": "",
|
| 213 |
+
"output": op,
|
| 214 |
+
"system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
|
| 215 |
+
})
|
| 216 |
+
|
| 217 |
+
data_n["history"] = history
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# # filter the tokens > 8096
|
| 222 |
+
# tokenizer = transformers.AutoTokenizer.from_pretrained('Ego-R1/qwen-sft-epoch3-len16192-20250511-3b-inst')
|
| 223 |
+
# tokens = tokenizer(f"{data_n['instruction']} {data_n['output']} {data_n['system']} {str(data_n['history'])}")
|
| 224 |
+
# if len(tokens['input_ids']) > 8096:
|
| 225 |
+
# print(f"Skipping data with tokens > 8096: {data['ID']}")
|
| 226 |
+
# continue
|
| 227 |
+
|
| 228 |
+
data_list.append(data_n)
|
| 229 |
+
global_counter += 1
|
| 230 |
+
|
| 231 |
+
if return_data:
|
| 232 |
+
return data_list
|
| 233 |
+
|
| 234 |
+
print(f"Size of the sft dataset: {len(data_list)}")
|
| 235 |
+
|
| 236 |
+
# Create output directory if it doesn't exist
|
| 237 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 238 |
+
|
| 239 |
+
# Save the processed data
|
| 240 |
+
with open(os.path.join(output_dir, "processed_data.json"), "w", encoding="utf-8") as out_f:
|
| 241 |
+
json.dump(data_list, out_f, ensure_ascii=False, indent=4)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def arg_parse():
|
| 246 |
+
"""Parse command line arguments."""
|
| 247 |
+
parser = argparse.ArgumentParser(description="Process EgoLife data for training")
|
| 248 |
+
parser.add_argument("--home_dir", type=str, default="/home/data2/sltian/code/evaluation_agent_dev")
|
| 249 |
+
parser.add_argument("--data_dir", type=str, default="ea-data") # for test set, the data_dir should be "/home/data2/sltian/code/Ego-R1_dev/egor1-bench/QA-egolife/benchmark/benchmark_shuffle_new"
|
| 250 |
+
parser.add_argument("--output_dir", type=str, default=None)
|
| 251 |
+
parser.add_argument("--format", type=str, choices=["alpaca", "glaive"], default="alpaca")
|
| 252 |
+
return parser.parse_args()
|
| 253 |
+
|
| 254 |
+
def main():
|
| 255 |
+
"""Main function to process data."""
|
| 256 |
+
args = arg_parse()
|
| 257 |
+
|
| 258 |
+
if args.output_dir is None:
|
| 259 |
+
import datetime
|
| 260 |
+
args.output_dir = os.path.join("data", f"postprocess_{datetime.datetime.now().strftime('%Y%m%d')}")
|
| 261 |
+
|
| 262 |
+
# Get all JSON files from the preprocess directory
|
| 263 |
+
preprocess_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess"
|
| 264 |
+
json_files = glob.glob(os.path.join(preprocess_dir, "*.json"))
|
| 265 |
+
|
| 266 |
+
# Filter out mapping files and only keep chat history files
|
| 267 |
+
chat_files = [f for f in json_files if not f.endswith("mapping.txt") and not f.endswith("summary_by_model.txt") and f.endswith(".json")]
|
| 268 |
+
|
| 269 |
+
print(f"Found {len(chat_files)} chat history files to process")
|
| 270 |
+
|
| 271 |
+
# Create a combined dataset from all files
|
| 272 |
+
all_data = []
|
| 273 |
+
for i, json_path in enumerate(chat_files):
|
| 274 |
+
print(f"Processing file {i+1}/{len(chat_files)}: {os.path.basename(json_path)}")
|
| 275 |
+
try:
|
| 276 |
+
file_data = convert_to_alpaca(json_path, args.output_dir, return_data=True)
|
| 277 |
+
all_data.extend(file_data)
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f"Error processing {json_path}: {e}")
|
| 280 |
+
continue
|
| 281 |
+
|
| 282 |
+
print(f"\nTotal training examples created: {len(all_data)}")
|
| 283 |
+
|
| 284 |
+
# Save the combined dataset
|
| 285 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 286 |
+
output_path = os.path.join(args.output_dir, "evaluation_agent_cot_dataset.json")
|
| 287 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 288 |
+
json.dump(all_data, f, ensure_ascii=False, indent=2)
|
| 289 |
+
|
| 290 |
+
print(f"Combined dataset saved to: {output_path}")
|
| 291 |
+
|
| 292 |
+
if __name__ == "__main__":
|
| 293 |
+
main()
|
data_pre/postprocess_t2i.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
import glob
|
| 5 |
+
import argparse
|
| 6 |
+
from datasets import load_dataset, concatenate_datasets
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import shutil
|
| 9 |
+
import chardet
|
| 10 |
+
import ast
|
| 11 |
+
import transformers
|
| 12 |
+
|
| 13 |
+
# Add parent directory to Python path
|
| 14 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 15 |
+
# Add current directory to Python path
|
| 16 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 17 |
+
from eval_agent.system_prompts import sys_prompts
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# vbench_dimention_df = pd.read_csv("eval_agent/vbench_dimension_scores.tsv", sep="\t")
|
| 21 |
+
t2i_dimention_df = pd.read_csv("eval_agent/t2i_comp_dimension_scores.tsv", sep="\t")
|
| 22 |
+
|
| 23 |
+
# Templates for different components
|
| 24 |
+
alpaca_template = {
|
| 25 |
+
"instruction": "{instruction}",
|
| 26 |
+
"input": "{input}",
|
| 27 |
+
"output": "{output}",
|
| 28 |
+
"system": "{system}"
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
thinking_template = "<think>{thinking}</think>"
|
| 33 |
+
tool_template = "<tool>{tool}</tool>"
|
| 34 |
+
|
| 35 |
+
observation_template = "<information>{information}</information>"
|
| 36 |
+
|
| 37 |
+
analysis_template = "<analysis>{analysis}</analysis>"
|
| 38 |
+
summary_template = "<summary>{summary}</summary>"
|
| 39 |
+
|
| 40 |
+
# Global counter for tracking data
|
| 41 |
+
global_counter = 0
|
| 42 |
+
length_counter = 0
|
| 43 |
+
|
| 44 |
+
def format_subaspect(sub_aspect):
|
| 45 |
+
"""Format sub-aspect for output."""
|
| 46 |
+
import random
|
| 47 |
+
template_list = [
|
| 48 |
+
"I will evaluate the model's sub-aspect: {sub_aspect}.",
|
| 49 |
+
"I will focus on the {sub_aspect} sub-aspect of the model.",
|
| 50 |
+
"Let me assess the {sub_aspect} sub-aspect of this model.",
|
| 51 |
+
"I need to examine the model's {sub_aspect} sub-aspect.",
|
| 52 |
+
"Now I will analyze the {sub_aspect} sub-aspect dimension.",
|
| 53 |
+
"I'll investigate the {sub_aspect} sub-aspect quality of the model.",
|
| 54 |
+
"Time to evaluate the {sub_aspect} sub-aspect performance.",
|
| 55 |
+
"I should check the model's {sub_aspect} sub-aspect capabilities."
|
| 56 |
+
]
|
| 57 |
+
selected_template = random.choice(template_list)
|
| 58 |
+
return selected_template.format(sub_aspect=sub_aspect)
|
| 59 |
+
|
| 60 |
+
def format_summary(analysis, summary):
|
| 61 |
+
"""Format summary for output."""
|
| 62 |
+
return f"Analysis: {analysis}\nSummary: {summary}"
|
| 63 |
+
|
| 64 |
+
def load_data(file_path):
|
| 65 |
+
"""Load JSON data from a file."""
|
| 66 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 67 |
+
return json.load(f)
|
| 68 |
+
|
| 69 |
+
def format_template(template, **kwargs):
|
| 70 |
+
"""Format a template with provided values."""
|
| 71 |
+
if isinstance(template, dict):
|
| 72 |
+
result = {}
|
| 73 |
+
for key, value in template.items():
|
| 74 |
+
if isinstance(value, str):
|
| 75 |
+
result[key] = value.format(**kwargs)
|
| 76 |
+
else:
|
| 77 |
+
result[key] = value
|
| 78 |
+
return result
|
| 79 |
+
return template.format(**kwargs)
|
| 80 |
+
|
| 81 |
+
def extract_obs(tool_name, obs):
|
| 82 |
+
"""Extract observation information for a tool."""
|
| 83 |
+
return f"Observation: {obs}"
|
| 84 |
+
|
| 85 |
+
def check_data(data):
|
| 86 |
+
"""Check if data is valid."""
|
| 87 |
+
if len(data["cot"]) > 8:
|
| 88 |
+
return False
|
| 89 |
+
if data["cot"][-1]["answer"] != data["ground_truth"]:
|
| 90 |
+
return False
|
| 91 |
+
return True
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# def format_eval_results(eval_results: dict) -> list:
|
| 95 |
+
# """Format eval results for output."""
|
| 96 |
+
# score = eval_results['score'][0]
|
| 97 |
+
# video_results_list = eval_results['score'][1] # list of dict
|
| 98 |
+
|
| 99 |
+
# # remove the video path
|
| 100 |
+
# for video_result in video_results_list:
|
| 101 |
+
# video_result.pop('video_path')
|
| 102 |
+
|
| 103 |
+
# return score, video_results_list
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# format the reference table
|
| 107 |
+
def format_dimension_as_string(df, dimension_name):
|
| 108 |
+
row = df.loc[df['Dimension'] == dimension_name]
|
| 109 |
+
if row.empty:
|
| 110 |
+
return f"No data found for dimension: {dimension_name}"
|
| 111 |
+
|
| 112 |
+
formatted_string = (
|
| 113 |
+
f"{row['Dimension'].values[0]}: "
|
| 114 |
+
f"Very High -> {row['Very High'].values[0]}, "
|
| 115 |
+
f"High -> {row['High'].values[0]}, "
|
| 116 |
+
f"Moderate -> {row['Moderate'].values[0]}, "
|
| 117 |
+
f"Low -> {row['Low'].values[0]}, "
|
| 118 |
+
f"Very Low -> {row['Very Low'].values[0]}"
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
return formatted_string
|
| 122 |
+
|
| 123 |
+
def format_eval_results(results, reference_table):
|
| 124 |
+
tool_name = results["Tool"]
|
| 125 |
+
average_score = results["eval_results"]["score"][0]
|
| 126 |
+
video_results = results["eval_results"]["score"][1]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
# More concise and structured format for SFT
|
| 130 |
+
output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
|
| 131 |
+
output += f"Results:\n"
|
| 132 |
+
output += f"- Overall score: {average_score:.4f}\n"
|
| 133 |
+
output += f"- Per-prompt scores:\n"
|
| 134 |
+
|
| 135 |
+
for video in video_results:
|
| 136 |
+
prompt = video["prompt"]
|
| 137 |
+
score = video["image_results"]
|
| 138 |
+
output += f" • \"{prompt}\": {score:.4f}\n"
|
| 139 |
+
|
| 140 |
+
return output
|
| 141 |
+
|
| 142 |
+
# Main function to convert the data to the Alpaca format
|
| 143 |
+
def convert_to_alpaca(json_path, output_dir, return_data=False):
|
| 144 |
+
"""Convert data to Alpaca format for training."""
|
| 145 |
+
global global_counter
|
| 146 |
+
data_list = []
|
| 147 |
+
# Process each file
|
| 148 |
+
with open(json_path, "r", encoding="utf-8") as in_f:
|
| 149 |
+
data = json.load(in_f)
|
| 150 |
+
|
| 151 |
+
# remove the last element
|
| 152 |
+
data.pop()
|
| 153 |
+
|
| 154 |
+
# data["ID"] = global_counter
|
| 155 |
+
ops = []
|
| 156 |
+
obs = []
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# Generate the history
|
| 161 |
+
for i in range(1, len(data)):
|
| 162 |
+
# Prepare the output
|
| 163 |
+
try:
|
| 164 |
+
if i == len(data) - 1: # last step
|
| 165 |
+
op = f"{thinking_template.format(thinking=data[i]['Thought'])}{summary_template.format(summary=format_summary(data[i]['Analysis'], data[i]['Summary']))}"
|
| 166 |
+
else:
|
| 167 |
+
op = f"{thinking_template.format(thinking=data[i]['Thought'] + ' ' + format_subaspect(data[i]['Sub-aspect']))}{tool_template.format(tool=data[i]['Tool'])}"
|
| 168 |
+
|
| 169 |
+
# only n-1 steps have observation
|
| 170 |
+
# obs.append(observation_template.format(information=extract_obs(data["cot"][i]["tool"]["name"], data["cot"][i]["observation"])))
|
| 171 |
+
# score, video_results_list = format_eval_results(data[i]['eval_results'])
|
| 172 |
+
# obs.append(observation_template.format(info0=score, info1=video_results_list)) # Current observation is the eval_results
|
| 173 |
+
reference_table = format_dimension_as_string(t2i_dimention_df, data[i]['Tool'])
|
| 174 |
+
obs.append(observation_template.format(information=format_eval_results(data[i], reference_table)))
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
except Exception as e:
|
| 178 |
+
print(f"Error in processing data {json_path} at step {i}: {e}")
|
| 179 |
+
continue
|
| 180 |
+
|
| 181 |
+
ops.append(op)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
# Build history for this step
|
| 185 |
+
history = []
|
| 186 |
+
for j in range(1, i): # Start from 1 since we process from step 1
|
| 187 |
+
if j == 1:
|
| 188 |
+
traj = [
|
| 189 |
+
data[0],
|
| 190 |
+
ops[j-1] # ops is 0-indexed but we start processing from step 1
|
| 191 |
+
]
|
| 192 |
+
else:
|
| 193 |
+
traj = [
|
| 194 |
+
obs[j-2], # obs is built as we go
|
| 195 |
+
ops[j-1]
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
history.append(traj)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
# Convert the data to the Alpaca format at the n-th step
|
| 204 |
+
if i == 1: # First step after initial instruction
|
| 205 |
+
data_n = format_template(alpaca_template, **{
|
| 206 |
+
"instruction": data[0],
|
| 207 |
+
"input": "",
|
| 208 |
+
"output": op,
|
| 209 |
+
"system": sys_prompts["eval-agent-t2i-training-sys"] + sys_prompts["eval-agent-format-sys"]
|
| 210 |
+
})
|
| 211 |
+
else:
|
| 212 |
+
data_n = format_template(alpaca_template, **{
|
| 213 |
+
"instruction": obs[i-2], # Previous observation
|
| 214 |
+
"input": "",
|
| 215 |
+
"output": op,
|
| 216 |
+
"system": sys_prompts["eval-agent-t2i-training-sys"] + sys_prompts["eval-agent-format-sys"]
|
| 217 |
+
})
|
| 218 |
+
|
| 219 |
+
data_n["history"] = history
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# # filter the tokens > 8096
|
| 224 |
+
# tokenizer = transformers.AutoTokenizer.from_pretrained('Ego-R1/qwen-sft-epoch3-len16192-20250511-3b-inst')
|
| 225 |
+
# tokens = tokenizer(f"{data_n['instruction']} {data_n['output']} {data_n['system']} {str(data_n['history'])}")
|
| 226 |
+
# if len(tokens['input_ids']) > 8096:
|
| 227 |
+
# print(f"Skipping data with tokens > 8096: {data['ID']}")
|
| 228 |
+
# continue
|
| 229 |
+
|
| 230 |
+
data_list.append(data_n)
|
| 231 |
+
global_counter += 1
|
| 232 |
+
|
| 233 |
+
if return_data:
|
| 234 |
+
return data_list
|
| 235 |
+
|
| 236 |
+
print(f"Size of the sft dataset: {len(data_list)}")
|
| 237 |
+
|
| 238 |
+
# Create output directory if it doesn't exist
|
| 239 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 240 |
+
|
| 241 |
+
# Save the processed data
|
| 242 |
+
with open(os.path.join(output_dir, "processed_data.json"), "w", encoding="utf-8") as out_f:
|
| 243 |
+
json.dump(data_list, out_f, ensure_ascii=False, indent=4)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def arg_parse():
|
| 248 |
+
"""Parse command line arguments."""
|
| 249 |
+
parser = argparse.ArgumentParser(description="Process EgoLife data for training")
|
| 250 |
+
parser.add_argument("--home_dir", type=str, default="/home/data2/sltian/code/evaluation_agent_dev")
|
| 251 |
+
parser.add_argument("--data_dir", type=str, default="ea-data") # for test set, the data_dir should be "/home/data2/sltian/code/Ego-R1_dev/egor1-bench/QA-egolife/benchmark/benchmark_shuffle_new"
|
| 252 |
+
parser.add_argument("--output_dir", type=str, default=None)
|
| 253 |
+
parser.add_argument("--format", type=str, choices=["alpaca", "glaive"], default="alpaca")
|
| 254 |
+
return parser.parse_args()
|
| 255 |
+
|
| 256 |
+
def main():
|
| 257 |
+
"""Main function to process data."""
|
| 258 |
+
args = arg_parse()
|
| 259 |
+
|
| 260 |
+
if args.output_dir is None:
|
| 261 |
+
import datetime
|
| 262 |
+
args.output_dir = os.path.join("data", f"postprocess_{datetime.datetime.now().strftime('%Y%m%d')}")
|
| 263 |
+
|
| 264 |
+
# Get all JSON files from the preprocess directory
|
| 265 |
+
preprocess_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess-t2i"
|
| 266 |
+
json_files = glob.glob(os.path.join(preprocess_dir, "*.json"))
|
| 267 |
+
|
| 268 |
+
# Filter out mapping files and only keep chat history files
|
| 269 |
+
chat_files = [f for f in json_files if not f.endswith("mapping.txt") and not f.endswith("summary_by_model.txt") and f.endswith(".json")]
|
| 270 |
+
|
| 271 |
+
print(f"Found {len(chat_files)} chat history files to process")
|
| 272 |
+
|
| 273 |
+
# Create a combined dataset from all files
|
| 274 |
+
all_data = []
|
| 275 |
+
for i, json_path in enumerate(chat_files):
|
| 276 |
+
print(f"Processing file {i+1}/{len(chat_files)}: {os.path.basename(json_path)}")
|
| 277 |
+
try:
|
| 278 |
+
file_data = convert_to_alpaca(json_path, args.output_dir, return_data=True)
|
| 279 |
+
all_data.extend(file_data)
|
| 280 |
+
except Exception as e:
|
| 281 |
+
print(f"Error processing {json_path}: {e}")
|
| 282 |
+
continue
|
| 283 |
+
|
| 284 |
+
print(f"\nTotal training examples created: {len(all_data)}")
|
| 285 |
+
|
| 286 |
+
# Save the combined dataset
|
| 287 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 288 |
+
output_path = os.path.join(args.output_dir, "evaluation_agent_cot_dataset_t2i.json")
|
| 289 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 290 |
+
json.dump(all_data, f, ensure_ascii=False, indent=2)
|
| 291 |
+
|
| 292 |
+
print(f"Combined dataset saved to: {output_path}")
|
| 293 |
+
|
| 294 |
+
if __name__ == "__main__":
|
| 295 |
+
main()
|
data_pre/view_data.ipynb
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"vscode": {
|
| 8 |
+
"languageId": "plaintext"
|
| 9 |
+
}
|
| 10 |
+
},
|
| 11 |
+
"outputs": [],
|
| 12 |
+
"source": [
|
| 13 |
+
"\n",
|
| 14 |
+
" import json\n",
|
| 15 |
+
"\n",
|
| 16 |
+
" # Load the JSON file\n",
|
| 17 |
+
" file_path = 'your_file.json'\n",
|
| 18 |
+
" with open(file_path, 'r') as f:\n",
|
| 19 |
+
" data = json.load(f)\n",
|
| 20 |
+
"\n",
|
| 21 |
+
" # The structure contains:\n",
|
| 22 |
+
" # - data[0]: The evaluation question\n",
|
| 23 |
+
" # - data[1-3]: Three evaluation rounds with different complexity levels\n",
|
| 24 |
+
" # - data[4]: Final analysis and summary\n",
|
| 25 |
+
" # - data[5]: Full chat history\n",
|
| 26 |
+
"\n",
|
| 27 |
+
" # Extract evaluation results\n",
|
| 28 |
+
" question = data[0]\n",
|
| 29 |
+
" evaluations = []\n",
|
| 30 |
+
"\n",
|
| 31 |
+
" for item in data[1:4]: # The three evaluation rounds\n",
|
| 32 |
+
" eval_info = {\n",
|
| 33 |
+
" 'sub_aspect': item['Sub-aspect'],\n",
|
| 34 |
+
" 'tool': item['Tool'],\n",
|
| 35 |
+
" 'thought': item['Thought'],\n",
|
| 36 |
+
" 'average_score': item['eval_results']['score'][0],\n",
|
| 37 |
+
" 'detailed_results': item['eval_results']['score'][1]\n",
|
| 38 |
+
" }\n",
|
| 39 |
+
" evaluations.append(eval_info)\n",
|
| 40 |
+
"\n",
|
| 41 |
+
" # Extract individual video scores\n",
|
| 42 |
+
" for eval_round in evaluations:\n",
|
| 43 |
+
" print(f\"\\n{eval_round['sub_aspect']}:\")\n",
|
| 44 |
+
" for video in eval_round['detailed_results']:\n",
|
| 45 |
+
" print(f\" {video['prompt']}: {video['video_results']:.4f}\")\n",
|
| 46 |
+
"\n",
|
| 47 |
+
" # Get final analysis (if present)\n",
|
| 48 |
+
" if isinstance(data[4], dict) and 'Analysis' in data[4]:\n",
|
| 49 |
+
" final_analysis = data[4]['Analysis']\n",
|
| 50 |
+
" summary = data[4]['Summary']"
|
| 51 |
+
]
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"metadata": {
|
| 55 |
+
"language_info": {
|
| 56 |
+
"name": "python"
|
| 57 |
+
}
|
| 58 |
+
},
|
| 59 |
+
"nbformat": 4,
|
| 60 |
+
"nbformat_minor": 2
|
| 61 |
+
}
|
dataset/README.md
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Open-Ended User Query Dataset
|
| 2 |
+
|
| 3 |
+
We compiled the final open-ended user query dataset into the `dataset/open_ended_user_questions.json` file
|
| 4 |
+
|
dataset/open_ended_user_questions.json
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"questions": [
|
| 3 |
+
{
|
| 4 |
+
"question": "How well can the model visualize my ideas based on my words?",
|
| 5 |
+
"ability": "Prompt Following",
|
| 6 |
+
"general_or_specific": "General",
|
| 7 |
+
"category": ""
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"question": "How aligned is the generated content with the provided description?",
|
| 11 |
+
"ability": "Prompt Following",
|
| 12 |
+
"general_or_specific": "General",
|
| 13 |
+
"category": ""
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"question": "Does the model understand specific terms, objects, and intentions?",
|
| 17 |
+
"ability": "Prompt Following",
|
| 18 |
+
"general_or_specific": "General",
|
| 19 |
+
"category": ""
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"question": "How well does the model respond to ambiguous prompts? Does it handle missing or vague information effectively?",
|
| 23 |
+
"ability": "Prompt Following",
|
| 24 |
+
"general_or_specific": "General",
|
| 25 |
+
"category": ""
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"question": "How closely does the generated output correspond to the prompts provided?",
|
| 29 |
+
"ability": "Prompt Following",
|
| 30 |
+
"general_or_specific": "General",
|
| 31 |
+
"category": ""
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"question": "Does the generated result match the description?",
|
| 35 |
+
"ability": "Prompt Following",
|
| 36 |
+
"general_or_specific": "General",
|
| 37 |
+
"category": ""
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"question": "Can the model maintain style consistency across multiple generations from the same prompt?",
|
| 41 |
+
"ability": "Prompt Following",
|
| 42 |
+
"general_or_specific": "General",
|
| 43 |
+
"category": ""
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"question": "How well does the model distinguish between subtle variations in style (e.g., modern vs. postmodern art)?",
|
| 47 |
+
"ability": "Prompt Following",
|
| 48 |
+
"general_or_specific": "General",
|
| 49 |
+
"category": ""
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"question": "What style does the model tend to generate most often?",
|
| 53 |
+
"ability": "Prompt Following",
|
| 54 |
+
"general_or_specific": "General",
|
| 55 |
+
"category": ""
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"question": "Does the model adapt to different stylistic instructions with accuracy and consistency?",
|
| 59 |
+
"ability": "Prompt Following",
|
| 60 |
+
"general_or_specific": "General",
|
| 61 |
+
"category": ""
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"question": "How precisely can the user specify object relationships?",
|
| 65 |
+
"ability": "Prompt Following",
|
| 66 |
+
"general_or_specific": "General",
|
| 67 |
+
"category": ""
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"question": "How well can the model maintain object proportions and spatial relationships in complex scenes?",
|
| 71 |
+
"ability": "Prompt Following",
|
| 72 |
+
"general_or_specific": "General",
|
| 73 |
+
"category": ""
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"question": "How effectively does the model handle changes in perspective?",
|
| 77 |
+
"ability": "Prompt Following",
|
| 78 |
+
"general_or_specific": "General",
|
| 79 |
+
"category": ""
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"question": "Does the model demonstrate flexibility in generating different compositions from similar prompts?",
|
| 83 |
+
"ability": "Prompt Following",
|
| 84 |
+
"general_or_specific": "General",
|
| 85 |
+
"category": ""
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"question": "How precisely can I control the generative model?",
|
| 89 |
+
"ability": "Prompt Following",
|
| 90 |
+
"general_or_specific": "General",
|
| 91 |
+
"category": ""
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"question": "Does the model tend to simplify complex objects or details, or does it retain high levels of intricacy?",
|
| 95 |
+
"ability": "Prompt Following",
|
| 96 |
+
"general_or_specific": "General",
|
| 97 |
+
"category": ""
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"question": "Can the model adjust its output when given slight variations of the same prompt (e.g., changes in tone or context)?",
|
| 101 |
+
"ability": "Prompt Following",
|
| 102 |
+
"general_or_specific": "General",
|
| 103 |
+
"category": ""
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"question": "How well does the model handle varying levels of detail in its outputs?",
|
| 107 |
+
"ability": "Prompt Following",
|
| 108 |
+
"general_or_specific": "General",
|
| 109 |
+
"category": ""
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"question": "Does the model generate similar outputs when there are only small changes in the prompts?",
|
| 113 |
+
"ability": "Prompt Following",
|
| 114 |
+
"general_or_specific": "General",
|
| 115 |
+
"category": ""
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"question": "How well does the model balance contrasting elements, such as light and dark or complex and simple?",
|
| 119 |
+
"ability": "Visual Quality",
|
| 120 |
+
"general_or_specific": "General",
|
| 121 |
+
"category": ""
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"question": "To what extent can the model produce photorealistic and aesthetically pleasing results?",
|
| 125 |
+
"ability": "Visual Quality",
|
| 126 |
+
"general_or_specific": "General",
|
| 127 |
+
"category": ""
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"question": "Can the model generate realistic images without common AI-generated artifacts?",
|
| 131 |
+
"ability": "Visual Quality",
|
| 132 |
+
"general_or_specific": "General",
|
| 133 |
+
"category": ""
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"question": "Can it generate physically plausible results?",
|
| 137 |
+
"ability": "Visual Quality",
|
| 138 |
+
"general_or_specific": "General",
|
| 139 |
+
"category": ""
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"question": "How realistic is the generated content?",
|
| 143 |
+
"ability": "Visual Quality",
|
| 144 |
+
"general_or_specific": "General",
|
| 145 |
+
"category": ""
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"question": "Can it generate results that are as realistic as possible?",
|
| 149 |
+
"ability": "Visual Quality",
|
| 150 |
+
"general_or_specific": "General",
|
| 151 |
+
"category": ""
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"question": "How real are the generated results?",
|
| 155 |
+
"ability": "Visual Quality",
|
| 156 |
+
"general_or_specific": "General",
|
| 157 |
+
"category": ""
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"question": "Can the model generate high-fidelity content?",
|
| 161 |
+
"ability": "Visual Quality",
|
| 162 |
+
"general_or_specific": "General",
|
| 163 |
+
"category": ""
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"question": "Can the model generate real-life scenes?",
|
| 167 |
+
"ability": "Visual Quality",
|
| 168 |
+
"general_or_specific": "General",
|
| 169 |
+
"category": ""
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"question": "Can the model maintain high image quality when generating large or detailed scenes?",
|
| 173 |
+
"ability": "Visual Quality",
|
| 174 |
+
"general_or_specific": "General",
|
| 175 |
+
"category": ""
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"question": "Does the model tend to generate more photorealistic images or stylized, cartoonish ones?",
|
| 179 |
+
"ability": "Visual Quality",
|
| 180 |
+
"general_or_specific": "General",
|
| 181 |
+
"category": ""
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"question": "Can the model mimic camera effects such as zooming, panning, or focus pulls, especially when not explicitly described in the prompt?",
|
| 185 |
+
"ability": "Visual Quality",
|
| 186 |
+
"general_or_specific": "General",
|
| 187 |
+
"category": ""
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"question": "How well can the model balance aesthetic elements like space, flow, and symmetry in a room setting?",
|
| 191 |
+
"ability": "Visual Quality",
|
| 192 |
+
"general_or_specific": "General",
|
| 193 |
+
"category": ""
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"question": "Can the model be effectively applied in different scenarios, such as varying lighting conditions and environments?",
|
| 197 |
+
"ability": "Visual Quality",
|
| 198 |
+
"general_or_specific": "General",
|
| 199 |
+
"category": ""
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"question": "How well can the model handle lighting and movement?",
|
| 203 |
+
"ability": "Visual Quality",
|
| 204 |
+
"general_or_specific": "General",
|
| 205 |
+
"category": ""
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"question": "How effectively can the model simulate real-world lighting effects for interior and exterior spaces?",
|
| 209 |
+
"ability": "Visual Quality",
|
| 210 |
+
"general_or_specific": "General",
|
| 211 |
+
"category": ""
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"question": "Can the model generate high-fidelity visualizations for natural phenomena like fluid dynamics or planetary orbits?",
|
| 215 |
+
"ability": "Visual Quality",
|
| 216 |
+
"general_or_specific": "General",
|
| 217 |
+
"category": ""
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"question": "How well does the model handle dynamic elements like action or motion?",
|
| 221 |
+
"ability": "Visual Quality",
|
| 222 |
+
"general_or_specific": "General",
|
| 223 |
+
"category": ""
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"question": "How well can the model generate realistic scenes for a sci-fi film?",
|
| 227 |
+
"ability": "Visual Quality",
|
| 228 |
+
"general_or_specific": "Specific",
|
| 229 |
+
"category": "Film and Entertainment"
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"question": "How effectively can the model manage physical realism, such as lighting, shadows, and accurate human anatomy?",
|
| 233 |
+
"ability": "Visual Quality",
|
| 234 |
+
"general_or_specific": "Specific",
|
| 235 |
+
"category": "Medical"
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"question": "Can the model generate detailed game environments such as forests, cities, or futuristic settings?",
|
| 239 |
+
"ability": "Visual Quality",
|
| 240 |
+
"general_or_specific": "Specific",
|
| 241 |
+
"category": "Game Design"
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"question": "How well can the model interpret abstract or metaphorical concepts? Does it translate non-literal ideas into visual form effectively?",
|
| 245 |
+
"ability": "Creativity",
|
| 246 |
+
"general_or_specific": "General",
|
| 247 |
+
"category": ""
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"question": "How well can the model present concepts to explore creative possibilities?",
|
| 251 |
+
"ability": "Creativity",
|
| 252 |
+
"general_or_specific": "General",
|
| 253 |
+
"category": ""
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"question": "Can the model blend different genres or themes successfully?",
|
| 257 |
+
"ability": "Creativity",
|
| 258 |
+
"general_or_specific": "General",
|
| 259 |
+
"category": ""
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"question": "How well does the model balance realism with creativity? Can it generate imaginative scenes while maintaining visual plausibility?",
|
| 263 |
+
"ability": "Creativity",
|
| 264 |
+
"general_or_specific": "General",
|
| 265 |
+
"category": ""
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"question": "How well does the model capture the mood or atmosphere of a given prompt?",
|
| 269 |
+
"ability": "Creativity",
|
| 270 |
+
"general_or_specific": "General",
|
| 271 |
+
"category": ""
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"question": "How effectively can the model represent non-visual concepts, such as emotions or abstract ideas?",
|
| 275 |
+
"ability": "Creativity",
|
| 276 |
+
"general_or_specific": "General",
|
| 277 |
+
"category": ""
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"question": "How does the model handle the depiction of abstract concepts or metaphors in video form? For example, can it animate a 'storm of emotions' or 'time flowing like water'?",
|
| 281 |
+
"ability": "Creativity",
|
| 282 |
+
"general_or_specific": "General",
|
| 283 |
+
"category": ""
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"question": "Can the model handle contradictory visual cues in a prompt? For example, can it create a 'sunset during a snowstorm' or a 'frozen ocean with waves'?",
|
| 287 |
+
"ability": "Creativity",
|
| 288 |
+
"general_or_specific": "General",
|
| 289 |
+
"category": ""
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"question": "How well can the model integrate symbolic or surreal elements, like a tree growing out of someone's hand, while keeping the scene coherent and natural?",
|
| 293 |
+
"ability": "Creativity",
|
| 294 |
+
"general_or_specific": "General",
|
| 295 |
+
"category": ""
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"question": "Can the model render entirely fictional creatures or physics-defying actions like characters walking on walls or gravity reversing?",
|
| 299 |
+
"ability": "Creativity",
|
| 300 |
+
"general_or_specific": "General",
|
| 301 |
+
"category": ""
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"question": "Can the model generate content that evolve dynamically, where the scene or characters change in response to the user's previous inputs?",
|
| 305 |
+
"ability": "Creativity",
|
| 306 |
+
"general_or_specific": "General",
|
| 307 |
+
"category": ""
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"question": "How well can the model interpret narrative-driven prompts where characters are engaging in emotionally complex interactions without explicit action descriptors?",
|
| 311 |
+
"ability": "Creativity",
|
| 312 |
+
"general_or_specific": "General",
|
| 313 |
+
"category": ""
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"question": "How does the model handle unusual or impossible lighting conditions, like 'darkness lit only by the glow of thoughts' or 'a world where shadows are brighter than light'?",
|
| 317 |
+
"ability": "Creativity",
|
| 318 |
+
"general_or_specific": "General",
|
| 319 |
+
"category": ""
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"question": "Can the model generate variations of existing artwork while maintaining the original style?",
|
| 323 |
+
"ability": "Creativity",
|
| 324 |
+
"general_or_specific": "General",
|
| 325 |
+
"category": ""
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"question": "Can the model create videos with non-linear narratives, such as time jumps or reverse chronology? Can it handle multiple timelines that converge or diverge?",
|
| 329 |
+
"ability": "Creativity",
|
| 330 |
+
"general_or_specific": "Specific",
|
| 331 |
+
"category": "Film and Entertainment"
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"question": "Is the generated content sufficiently diverse, including rare or niche fields?",
|
| 335 |
+
"ability": "Others",
|
| 336 |
+
"general_or_specific": "General",
|
| 337 |
+
"category": ""
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"question": "How well the model can generate a specific number of objects?",
|
| 341 |
+
"ability": "Others",
|
| 342 |
+
"general_or_specific": "General",
|
| 343 |
+
"category": ""
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"question": "Does the model tend to overuse specific colors or textures, or does it offer diverse visual outputs?",
|
| 347 |
+
"ability": "Others",
|
| 348 |
+
"general_or_specific": "General",
|
| 349 |
+
"category": ""
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"question": "Does the model generate diverse results for the same prompt, or does it repeat patterns?",
|
| 353 |
+
"ability": "Others",
|
| 354 |
+
"general_or_specific": "General",
|
| 355 |
+
"category": ""
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"question": "Can the model generate flowcharts effectively?",
|
| 359 |
+
"ability": "Creativity",
|
| 360 |
+
"general_or_specific": "Specific",
|
| 361 |
+
"category": "Science and Education"
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"question": "How well can the model generate presentation slides?",
|
| 365 |
+
"ability": "Creativity",
|
| 366 |
+
"general_or_specific": "Specific",
|
| 367 |
+
"category": "Science and Education"
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"question": "Can the model generate clear, courtroom-friendly diagrams to illustrate complex legal cases?",
|
| 371 |
+
"ability": "Creativity",
|
| 372 |
+
"general_or_specific": "Specific",
|
| 373 |
+
"category": "Law"
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"question": "How well can the model generate accurate, scalable floor plans or blueprints?",
|
| 377 |
+
"ability": "Creativity",
|
| 378 |
+
"general_or_specific": "Specific",
|
| 379 |
+
"category": "Architecture and Interior Design"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"question": "Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 383 |
+
"ability": "Creativity",
|
| 384 |
+
"general_or_specific": "Specific",
|
| 385 |
+
"category": "Architecture and Interior Design"
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"question": "How effectively can the model visualize fictional worlds or characters with great detail?",
|
| 389 |
+
"ability": "Creativity",
|
| 390 |
+
"general_or_specific": "Specific",
|
| 391 |
+
"category": "Film and Entertainment"
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"question": "Can the model generate images that visually suggest different stages of a continuous event or process (e.g., sunrise to sunset progression)?",
|
| 395 |
+
"ability": "Others",
|
| 396 |
+
"general_or_specific": "General",
|
| 397 |
+
"category": ""
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"question": "How well does the model interpret abstract emotional concepts into visual formats?",
|
| 401 |
+
"ability": "Others",
|
| 402 |
+
"general_or_specific": "Specific",
|
| 403 |
+
"category": "Medical"
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"question": "How accurately can the model depict human expressions or body language for psychological studies?",
|
| 407 |
+
"ability": "Others",
|
| 408 |
+
"general_or_specific": "Specific",
|
| 409 |
+
"category": "Medical"
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"question": "Can the model generate different animal anatomical structures?",
|
| 413 |
+
"ability": "Knowledge",
|
| 414 |
+
"general_or_specific": "Specific",
|
| 415 |
+
"category": "Medical"
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"question": "Can the model generate detailed steps of surgical procedures that cannot be shown in typical surgery videos?",
|
| 419 |
+
"ability": "Knowledge",
|
| 420 |
+
"general_or_specific": "Specific",
|
| 421 |
+
"category": "Medical"
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"question": "Can the model understand and apply photographic concepts like focal length, aperture, or ISO?",
|
| 425 |
+
"ability": "Knowledge",
|
| 426 |
+
"general_or_specific": "Specific",
|
| 427 |
+
"category": "Film and Entertainment"
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"question": "How accurately can the model generate medical images like X-rays or MRI scans from brief descriptions?",
|
| 431 |
+
"ability": "Knowledge",
|
| 432 |
+
"general_or_specific": "Specific",
|
| 433 |
+
"category": "Medical"
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"question": "How does the model handle prompts with explicit restrictions, like generating an image ‘with no use of circular shapes’ or ‘without blue hues’?",
|
| 437 |
+
"ability": "Others",
|
| 438 |
+
"general_or_specific": "General",
|
| 439 |
+
"category": ""
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"question": "How well can the model generate 3D visualizations of internal organs or surgical procedures?",
|
| 443 |
+
"ability": "Knowledge",
|
| 444 |
+
"general_or_specific": "Specific",
|
| 445 |
+
"category": "Medical"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"question": "Can the model handle complex, culturally specific objects, symbols, or rituals?",
|
| 449 |
+
"ability": "Knowledge",
|
| 450 |
+
"general_or_specific": "Specific",
|
| 451 |
+
"category": "History and Culture"
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"question": "Can the model generate precise visual representations of complex mathematical graphs or formulas?",
|
| 455 |
+
"ability": "Knowledge",
|
| 456 |
+
"general_or_specific": "Specific",
|
| 457 |
+
"category": "Science and Education"
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"question": "How accurately does the model generate scientific diagrams like molecular structures or physics simulations?",
|
| 461 |
+
"ability": "Knowledge",
|
| 462 |
+
"general_or_specific": "Specific",
|
| 463 |
+
"category": "Science and Education"
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"question": "How accurately can the model recreate historical scenes, objects, or clothing based on text inputs?",
|
| 467 |
+
"ability": "Knowledge",
|
| 468 |
+
"general_or_specific": "Specific",
|
| 469 |
+
"category": "History and Culture"
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"question": "Can the model accurately depict diverse cultural or historical elements?",
|
| 473 |
+
"ability": "Knowledge",
|
| 474 |
+
"general_or_specific": "Specific",
|
| 475 |
+
"category": "History and Culture"
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"question": "How well can the model recreate a crime scene based on textual descriptions or evidence?",
|
| 479 |
+
"ability": "Knowledge",
|
| 480 |
+
"general_or_specific": "Specific",
|
| 481 |
+
"category": "Law"
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"question": "How accurately does the model generate facial composites based on a witness’s description?",
|
| 485 |
+
"ability": "Knowledge",
|
| 486 |
+
"general_or_specific": "Specific",
|
| 487 |
+
"category": "Law"
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"question": "Can the model generate anime characters?",
|
| 491 |
+
"ability": "Knowledge",
|
| 492 |
+
"general_or_specific": "Specific",
|
| 493 |
+
"category": "Game Design"
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"question": "How precisely can the model replicate specific art styles, such as pixel art or 3D-rendered graphics?",
|
| 497 |
+
"ability": "Knowledge",
|
| 498 |
+
"general_or_specific": "Specific",
|
| 499 |
+
"category": "Game Design"
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"question": "How well does the model interpret perspective and depth in a scene?",
|
| 503 |
+
"ability": "Knowledge",
|
| 504 |
+
"general_or_specific": "Specific",
|
| 505 |
+
"category": "Game Design"
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"question": "Can the model generate geometric and texture effects?",
|
| 509 |
+
"ability": "Knowledge",
|
| 510 |
+
"general_or_specific": "Specific",
|
| 511 |
+
"category": "Game Design"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"question": "Can the model generate game characters with intricate details like armor or facial expressions?",
|
| 515 |
+
"ability": "Knowledge",
|
| 516 |
+
"general_or_specific": "Specific",
|
| 517 |
+
"category": "Game Design"
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"question": "Can the model generate objects with highly structured textures, such as accurately rendering five fingers on a hand?",
|
| 521 |
+
"ability": "Knowledge",
|
| 522 |
+
"general_or_specific": "Specific",
|
| 523 |
+
"category": "Medical"
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"question": "Can the model accurately generate fashion sketches with fine detailing of fabrics and textures?",
|
| 527 |
+
"ability": "Knowledge",
|
| 528 |
+
"general_or_specific": "Specific",
|
| 529 |
+
"category": "Fashion"
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"question": "Can the model generate complex storyboards incorporating multiple objects, characters, and settings in one scene?",
|
| 533 |
+
"ability": "Knowledge",
|
| 534 |
+
"general_or_specific": "Specific",
|
| 535 |
+
"category": "Film and Entertainment"
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"question": "How accurately can the model generate detailed room layouts with attention to furniture, textures, and color schemes?",
|
| 539 |
+
"ability": "Knowledge",
|
| 540 |
+
"general_or_specific": "Specific",
|
| 541 |
+
"category": "Architecture and Interior Design"
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"question": "Can the model handle fine detailing of various materials like wood, metal, or fabric in generated interiors?",
|
| 545 |
+
"ability": "Knowledge",
|
| 546 |
+
"general_or_specific": "Specific",
|
| 547 |
+
"category": "Architecture and Interior Design"
|
| 548 |
+
},
|
| 549 |
+
{
|
| 550 |
+
"question": "How well can the model represent subtle textures such as skin, fabric, or foliage?",
|
| 551 |
+
"ability": "Knowledge",
|
| 552 |
+
"general_or_specific": "Specific",
|
| 553 |
+
"category": "Fashion"
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"question": "How quickly can the model generate a 3–5-second video with a specific theme?",
|
| 557 |
+
"ability": "Others",
|
| 558 |
+
"general_or_specific": "General",
|
| 559 |
+
"category": ""
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"question": "Can the model differentiate the relationships of characters in the prompts?",
|
| 563 |
+
"ability": "Others",
|
| 564 |
+
"general_or_specific": "General",
|
| 565 |
+
"category": ""
|
| 566 |
+
},
|
| 567 |
+
{
|
| 568 |
+
"question": "How well does the model handle varying levels of detail in its outputs?",
|
| 569 |
+
"ability": "Others",
|
| 570 |
+
"general_or_specific": "General",
|
| 571 |
+
"category": ""
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"question": "Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 575 |
+
"ability": "Others",
|
| 576 |
+
"general_or_specific": "Specific",
|
| 577 |
+
"category": "Architecture and Interior Design"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"question": "Can the model blend or fuse different design elements (e.g., mix traditional and modern fashion elements in one outfit)?",
|
| 581 |
+
"ability": "Others",
|
| 582 |
+
"general_or_specific": "Specific",
|
| 583 |
+
"category": "Fashion"
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"question": "Is the model capable of generating designs tailored to individual body types or preferences?",
|
| 587 |
+
"ability": "Others",
|
| 588 |
+
"general_or_specific": "Specific",
|
| 589 |
+
"category": "Fashion"
|
| 590 |
+
},
|
| 591 |
+
{
|
| 592 |
+
"question": "How well does the model handle creating looks for specific occasions, such as casual, formal, or streetwear?",
|
| 593 |
+
"ability": "Others",
|
| 594 |
+
"general_or_specific": "Specific",
|
| 595 |
+
"category": "Fashion"
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"question": "How well can the model represent various fashion trends or styles based on vague descriptions?",
|
| 599 |
+
"ability": "Others",
|
| 600 |
+
"general_or_specific": "Specific",
|
| 601 |
+
"category": "Fashion"
|
| 602 |
+
}
|
| 603 |
+
]
|
| 604 |
+
}
|
dataset/open_ended_user_questions_summary.json
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Ability": {
|
| 3 |
+
"Prompt Following": [
|
| 4 |
+
"How well can the model visualize my ideas based on my words?",
|
| 5 |
+
"How aligned is the generated content with the provided description?",
|
| 6 |
+
"Does the model understand specific terms, objects, and intentions?",
|
| 7 |
+
"How well does the model respond to ambiguous prompts? Does it handle missing or vague information effectively?",
|
| 8 |
+
"How closely does the generated output correspond to the prompts provided?",
|
| 9 |
+
"Does the generated result match the description?",
|
| 10 |
+
"Can the model maintain style consistency across multiple generations from the same prompt?",
|
| 11 |
+
"How well does the model distinguish between subtle variations in style (e.g., modern vs. postmodern art)?",
|
| 12 |
+
"What style does the model tend to generate most often?",
|
| 13 |
+
"Does the model adapt to different stylistic instructions with accuracy and consistency?",
|
| 14 |
+
"How precisely can the user specify object relationships?",
|
| 15 |
+
"How well can the model maintain object proportions and spatial relationships in complex scenes?",
|
| 16 |
+
"How effectively does the model handle changes in perspective?",
|
| 17 |
+
"Does the model demonstrate flexibility in generating different compositions from similar prompts?",
|
| 18 |
+
"How precisely can I control the generative model?",
|
| 19 |
+
"Does the model tend to simplify complex objects or details, or does it retain high levels of intricacy?",
|
| 20 |
+
"Can the model adjust its output when given slight variations of the same prompt (e.g., changes in tone or context)?",
|
| 21 |
+
"How well does the model handle varying levels of detail in its outputs?",
|
| 22 |
+
"Does the model generate similar outputs when there are only small changes in the prompts?"
|
| 23 |
+
],
|
| 24 |
+
"Visual Quality": [
|
| 25 |
+
"How well does the model balance contrasting elements, such as light and dark or complex and simple?",
|
| 26 |
+
"To what extent can the model produce photorealistic and aesthetically pleasing results?",
|
| 27 |
+
"Can the model generate realistic images without common AI-generated artifacts?",
|
| 28 |
+
"Can it generate physically plausible results?",
|
| 29 |
+
"How realistic is the generated content?",
|
| 30 |
+
"Can it generate results that are as realistic as possible?",
|
| 31 |
+
"How real are the generated results?",
|
| 32 |
+
"Can the model generate high-fidelity content?",
|
| 33 |
+
"Can the model generate real-life scenes?",
|
| 34 |
+
"Can the model maintain high image quality when generating large or detailed scenes?",
|
| 35 |
+
"Does the model tend to generate more photorealistic images or stylized, cartoonish ones?",
|
| 36 |
+
"Can the model mimic camera effects such as zooming, panning, or focus pulls, especially when not explicitly described in the prompt?",
|
| 37 |
+
"How well can the model balance aesthetic elements like space, flow, and symmetry in a room setting?",
|
| 38 |
+
"Can the model be effectively applied in different scenarios, such as varying lighting conditions and environments?",
|
| 39 |
+
"How well can the model handle lighting and movement?",
|
| 40 |
+
"How effectively can the model simulate real-world lighting effects for interior and exterior spaces?",
|
| 41 |
+
"Can the model generate high-fidelity visualizations for natural phenomena like fluid dynamics or planetary orbits?",
|
| 42 |
+
"How well does the model handle dynamic elements like action or motion?",
|
| 43 |
+
"How well can the model generate realistic scenes for a sci-fi film?",
|
| 44 |
+
"How effectively can the model manage physical realism, such as lighting, shadows, and accurate human anatomy?",
|
| 45 |
+
"Can the model generate detailed game environments such as forests, cities, or futuristic settings?"
|
| 46 |
+
],
|
| 47 |
+
"Creativity": [
|
| 48 |
+
"How well can the model interpret abstract or metaphorical concepts? Does it translate non-literal ideas into visual form effectively?",
|
| 49 |
+
"How well can the model present concepts to explore creative possibilities?",
|
| 50 |
+
"Can the model blend different genres or themes successfully?",
|
| 51 |
+
"How well does the model balance realism with creativity? Can it generate imaginative scenes while maintaining visual plausibility?",
|
| 52 |
+
"How well does the model capture the mood or atmosphere of a given prompt?",
|
| 53 |
+
"How effectively can the model represent non-visual concepts, such as emotions or abstract ideas?",
|
| 54 |
+
"How does the model handle the depiction of abstract concepts or metaphors in video form? For example, can it animate a 'storm of emotions' or 'time flowing like water'?",
|
| 55 |
+
"Can the model handle contradictory visual cues in a prompt? For example, can it create a 'sunset during a snowstorm' or a 'frozen ocean with waves'?",
|
| 56 |
+
"How well can the model integrate symbolic or surreal elements, like a tree growing out of someone's hand, while keeping the scene coherent and natural?",
|
| 57 |
+
"Can the model render entirely fictional creatures or physics-defying actions like characters walking on walls or gravity reversing?",
|
| 58 |
+
"Can the model generate content that evolve dynamically, where the scene or characters change in response to the user's previous inputs?",
|
| 59 |
+
"How well can the model interpret narrative-driven prompts where characters are engaging in emotionally complex interactions without explicit action descriptors?",
|
| 60 |
+
"How does the model handle unusual or impossible lighting conditions, like 'darkness lit only by the glow of thoughts' or 'a world where shadows are brighter than light'?",
|
| 61 |
+
"Can the model generate variations of existing artwork while maintaining the original style?",
|
| 62 |
+
"Can the model create videos with non-linear narratives, such as time jumps or reverse chronology? Can it handle multiple timelines that converge or diverge?",
|
| 63 |
+
"Can the model generate flowcharts effectively?",
|
| 64 |
+
"How well can the model generate presentation slides?",
|
| 65 |
+
"Can the model generate clear, courtroom-friendly diagrams to illustrate complex legal cases?",
|
| 66 |
+
"How well can the model generate accurate, scalable floor plans or blueprints?",
|
| 67 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 68 |
+
"How effectively can the model visualize fictional worlds or characters with great detail?"
|
| 69 |
+
],
|
| 70 |
+
"Others": [
|
| 71 |
+
"Is the generated content sufficiently diverse, including rare or niche fields?",
|
| 72 |
+
"How well the model can generate a specific number of objects?",
|
| 73 |
+
"Does the model tend to overuse specific colors or textures, or does it offer diverse visual outputs?",
|
| 74 |
+
"Does the model generate diverse results for the same prompt, or does it repeat patterns?",
|
| 75 |
+
"Can the model generate images that visually suggest different stages of a continuous event or process (e.g., sunrise to sunset progression)?",
|
| 76 |
+
"How well does the model interpret abstract emotional concepts into visual formats?",
|
| 77 |
+
"How accurately can the model depict human expressions or body language for psychological studies?",
|
| 78 |
+
"How does the model handle prompts with explicit restrictions, like generating an image ‘with no use of circular shapes’ or ‘without blue hues’?",
|
| 79 |
+
"How quickly can the model generate a 3–5-second video with a specific theme?",
|
| 80 |
+
"Can the model differentiate the relationships of characters in the prompts?",
|
| 81 |
+
"How well does the model handle varying levels of detail in its outputs?",
|
| 82 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 83 |
+
"Can the model blend or fuse different design elements (e.g., mix traditional and modern fashion elements in one outfit)?",
|
| 84 |
+
"Is the model capable of generating designs tailored to individual body types or preferences?",
|
| 85 |
+
"How well does the model handle creating looks for specific occasions, such as casual, formal, or streetwear?",
|
| 86 |
+
"How well can the model represent various fashion trends or styles based on vague descriptions?"
|
| 87 |
+
],
|
| 88 |
+
"Knowledge": [
|
| 89 |
+
"Can the model generate different animal anatomical structures?",
|
| 90 |
+
"Can the model generate detailed steps of surgical procedures that cannot be shown in typical surgery videos?",
|
| 91 |
+
"Can the model understand and apply photographic concepts like focal length, aperture, or ISO?",
|
| 92 |
+
"How accurately can the model generate medical images like X-rays or MRI scans from brief descriptions?",
|
| 93 |
+
"How well can the model generate 3D visualizations of internal organs or surgical procedures?",
|
| 94 |
+
"Can the model handle complex, culturally specific objects, symbols, or rituals?",
|
| 95 |
+
"Can the model generate precise visual representations of complex mathematical graphs or formulas?",
|
| 96 |
+
"How accurately does the model generate scientific diagrams like molecular structures or physics simulations?",
|
| 97 |
+
"How accurately can the model recreate historical scenes, objects, or clothing based on text inputs?",
|
| 98 |
+
"Can the model accurately depict diverse cultural or historical elements?",
|
| 99 |
+
"How well can the model recreate a crime scene based on textual descriptions or evidence?",
|
| 100 |
+
"How accurately does the model generate facial composites based on a witness’s description?",
|
| 101 |
+
"Can the model generate anime characters?",
|
| 102 |
+
"How precisely can the model replicate specific art styles, such as pixel art or 3D-rendered graphics?",
|
| 103 |
+
"How well does the model interpret perspective and depth in a scene?",
|
| 104 |
+
"Can the model generate geometric and texture effects?",
|
| 105 |
+
"Can the model generate game characters with intricate details like armor or facial expressions?",
|
| 106 |
+
"Can the model generate objects with highly structured textures, such as accurately rendering five fingers on a hand?",
|
| 107 |
+
"Can the model accurately generate fashion sketches with fine detailing of fabrics and textures?",
|
| 108 |
+
"Can the model generate complex storyboards incorporating multiple objects, characters, and settings in one scene?",
|
| 109 |
+
"How accurately can the model generate detailed room layouts with attention to furniture, textures, and color schemes?",
|
| 110 |
+
"Can the model handle fine detailing of various materials like wood, metal, or fabric in generated interiors?",
|
| 111 |
+
"How well can the model represent subtle textures such as skin, fabric, or foliage?"
|
| 112 |
+
]
|
| 113 |
+
},
|
| 114 |
+
"General/Specific": {
|
| 115 |
+
"General": [
|
| 116 |
+
"How well can the model visualize my ideas based on my words?",
|
| 117 |
+
"How aligned is the generated content with the provided description?",
|
| 118 |
+
"Does the model understand specific terms, objects, and intentions?",
|
| 119 |
+
"How well does the model respond to ambiguous prompts? Does it handle missing or vague information effectively?",
|
| 120 |
+
"How closely does the generated output correspond to the prompts provided?",
|
| 121 |
+
"Does the generated result match the description?",
|
| 122 |
+
"Can the model maintain style consistency across multiple generations from the same prompt?",
|
| 123 |
+
"How well does the model distinguish between subtle variations in style (e.g., modern vs. postmodern art)?",
|
| 124 |
+
"What style does the model tend to generate most often?",
|
| 125 |
+
"Does the model adapt to different stylistic instructions with accuracy and consistency?",
|
| 126 |
+
"How precisely can the user specify object relationships?",
|
| 127 |
+
"How well can the model maintain object proportions and spatial relationships in complex scenes?",
|
| 128 |
+
"How effectively does the model handle changes in perspective?",
|
| 129 |
+
"Does the model demonstrate flexibility in generating different compositions from similar prompts?",
|
| 130 |
+
"How precisely can I control the generative model?",
|
| 131 |
+
"Does the model tend to simplify complex objects or details, or does it retain high levels of intricacy?",
|
| 132 |
+
"Can the model adjust its output when given slight variations of the same prompt (e.g., changes in tone or context)?",
|
| 133 |
+
"How well does the model handle varying levels of detail in its outputs?",
|
| 134 |
+
"Does the model generate similar outputs when there are only small changes in the prompts?",
|
| 135 |
+
"How well does the model balance contrasting elements, such as light and dark or complex and simple?",
|
| 136 |
+
"To what extent can the model produce photorealistic and aesthetically pleasing results?",
|
| 137 |
+
"Can the model generate realistic images without common AI-generated artifacts?",
|
| 138 |
+
"Can it generate physically plausible results?",
|
| 139 |
+
"How realistic is the generated content?",
|
| 140 |
+
"Can it generate results that are as realistic as possible?",
|
| 141 |
+
"How real are the generated results?",
|
| 142 |
+
"Can the model generate high-fidelity content?",
|
| 143 |
+
"Can the model generate real-life scenes?",
|
| 144 |
+
"Can the model maintain high image quality when generating large or detailed scenes?",
|
| 145 |
+
"Does the model tend to generate more photorealistic images or stylized, cartoonish ones?",
|
| 146 |
+
"Can the model mimic camera effects such as zooming, panning, or focus pulls, especially when not explicitly described in the prompt?",
|
| 147 |
+
"How well can the model balance aesthetic elements like space, flow, and symmetry in a room setting?",
|
| 148 |
+
"Can the model be effectively applied in different scenarios, such as varying lighting conditions and environments?",
|
| 149 |
+
"How well can the model handle lighting and movement?",
|
| 150 |
+
"How effectively can the model simulate real-world lighting effects for interior and exterior spaces?",
|
| 151 |
+
"Can the model generate high-fidelity visualizations for natural phenomena like fluid dynamics or planetary orbits?",
|
| 152 |
+
"How well does the model handle dynamic elements like action or motion?",
|
| 153 |
+
"How well can the model interpret abstract or metaphorical concepts? Does it translate non-literal ideas into visual form effectively?",
|
| 154 |
+
"How well can the model present concepts to explore creative possibilities?",
|
| 155 |
+
"Can the model blend different genres or themes successfully?",
|
| 156 |
+
"How well does the model balance realism with creativity? Can it generate imaginative scenes while maintaining visual plausibility?",
|
| 157 |
+
"How well does the model capture the mood or atmosphere of a given prompt?",
|
| 158 |
+
"How effectively can the model represent non-visual concepts, such as emotions or abstract ideas?",
|
| 159 |
+
"How does the model handle the depiction of abstract concepts or metaphors in video form? For example, can it animate a 'storm of emotions' or 'time flowing like water'?",
|
| 160 |
+
"Can the model handle contradictory visual cues in a prompt? For example, can it create a 'sunset during a snowstorm' or a 'frozen ocean with waves'?",
|
| 161 |
+
"How well can the model integrate symbolic or surreal elements, like a tree growing out of someone's hand, while keeping the scene coherent and natural?",
|
| 162 |
+
"Can the model render entirely fictional creatures or physics-defying actions like characters walking on walls or gravity reversing?",
|
| 163 |
+
"Can the model generate content that evolve dynamically, where the scene or characters change in response to the user's previous inputs?",
|
| 164 |
+
"How well can the model interpret narrative-driven prompts where characters are engaging in emotionally complex interactions without explicit action descriptors?",
|
| 165 |
+
"How does the model handle unusual or impossible lighting conditions, like 'darkness lit only by the glow of thoughts' or 'a world where shadows are brighter than light'?",
|
| 166 |
+
"Can the model generate variations of existing artwork while maintaining the original style?",
|
| 167 |
+
"Is the generated content sufficiently diverse, including rare or niche fields?",
|
| 168 |
+
"How well the model can generate a specific number of objects?",
|
| 169 |
+
"Does the model tend to overuse specific colors or textures, or does it offer diverse visual outputs?",
|
| 170 |
+
"Does the model generate diverse results for the same prompt, or does it repeat patterns?",
|
| 171 |
+
"Can the model generate images that visually suggest different stages of a continuous event or process (e.g., sunrise to sunset progression)?",
|
| 172 |
+
"How does the model handle prompts with explicit restrictions, like generating an image ‘with no use of circular shapes’ or ‘without blue hues’?",
|
| 173 |
+
"How quickly can the model generate a 3–5-second video with a specific theme?",
|
| 174 |
+
"Can the model differentiate the relationships of characters in the prompts?",
|
| 175 |
+
"How well does the model handle varying levels of detail in its outputs?"
|
| 176 |
+
],
|
| 177 |
+
"Specific": [
|
| 178 |
+
"How well can the model generate realistic scenes for a sci-fi film?",
|
| 179 |
+
"How effectively can the model manage physical realism, such as lighting, shadows, and accurate human anatomy?",
|
| 180 |
+
"Can the model generate detailed game environments such as forests, cities, or futuristic settings?",
|
| 181 |
+
"Can the model create videos with non-linear narratives, such as time jumps or reverse chronology? Can it handle multiple timelines that converge or diverge?",
|
| 182 |
+
"Can the model generate flowcharts effectively?",
|
| 183 |
+
"How well can the model generate presentation slides?",
|
| 184 |
+
"Can the model generate clear, courtroom-friendly diagrams to illustrate complex legal cases?",
|
| 185 |
+
"How well can the model generate accurate, scalable floor plans or blueprints?",
|
| 186 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 187 |
+
"How effectively can the model visualize fictional worlds or characters with great detail?",
|
| 188 |
+
"How well does the model interpret abstract emotional concepts into visual formats?",
|
| 189 |
+
"How accurately can the model depict human expressions or body language for psychological studies?",
|
| 190 |
+
"Can the model generate different animal anatomical structures?",
|
| 191 |
+
"Can the model generate detailed steps of surgical procedures that cannot be shown in typical surgery videos?",
|
| 192 |
+
"Can the model understand and apply photographic concepts like focal length, aperture, or ISO?",
|
| 193 |
+
"How accurately can the model generate medical images like X-rays or MRI scans from brief descriptions?",
|
| 194 |
+
"How well can the model generate 3D visualizations of internal organs or surgical procedures?",
|
| 195 |
+
"Can the model handle complex, culturally specific objects, symbols, or rituals?",
|
| 196 |
+
"Can the model generate precise visual representations of complex mathematical graphs or formulas?",
|
| 197 |
+
"How accurately does the model generate scientific diagrams like molecular structures or physics simulations?",
|
| 198 |
+
"How accurately can the model recreate historical scenes, objects, or clothing based on text inputs?",
|
| 199 |
+
"Can the model accurately depict diverse cultural or historical elements?",
|
| 200 |
+
"How well can the model recreate a crime scene based on textual descriptions or evidence?",
|
| 201 |
+
"How accurately does the model generate facial composites based on a witness’s description?",
|
| 202 |
+
"Can the model generate anime characters?",
|
| 203 |
+
"How precisely can the model replicate specific art styles, such as pixel art or 3D-rendered graphics?",
|
| 204 |
+
"How well does the model interpret perspective and depth in a scene?",
|
| 205 |
+
"Can the model generate geometric and texture effects?",
|
| 206 |
+
"Can the model generate game characters with intricate details like armor or facial expressions?",
|
| 207 |
+
"Can the model generate objects with highly structured textures, such as accurately rendering five fingers on a hand?",
|
| 208 |
+
"Can the model accurately generate fashion sketches with fine detailing of fabrics and textures?",
|
| 209 |
+
"Can the model generate complex storyboards incorporating multiple objects, characters, and settings in one scene?",
|
| 210 |
+
"How accurately can the model generate detailed room layouts with attention to furniture, textures, and color schemes?",
|
| 211 |
+
"Can the model handle fine detailing of various materials like wood, metal, or fabric in generated interiors?",
|
| 212 |
+
"How well can the model represent subtle textures such as skin, fabric, or foliage?",
|
| 213 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 214 |
+
"Can the model blend or fuse different design elements (e.g., mix traditional and modern fashion elements in one outfit)?",
|
| 215 |
+
"Is the model capable of generating designs tailored to individual body types or preferences?",
|
| 216 |
+
"How well does the model handle creating looks for specific occasions, such as casual, formal, or streetwear?",
|
| 217 |
+
"How well can the model represent various fashion trends or styles based on vague descriptions?"
|
| 218 |
+
]
|
| 219 |
+
},
|
| 220 |
+
"Category": {
|
| 221 |
+
"No Category": [
|
| 222 |
+
"How well can the model visualize my ideas based on my words?",
|
| 223 |
+
"How aligned is the generated content with the provided description?",
|
| 224 |
+
"Does the model understand specific terms, objects, and intentions?",
|
| 225 |
+
"How well does the model respond to ambiguous prompts? Does it handle missing or vague information effectively?",
|
| 226 |
+
"How closely does the generated output correspond to the prompts provided?",
|
| 227 |
+
"Does the generated result match the description?",
|
| 228 |
+
"Can the model maintain style consistency across multiple generations from the same prompt?",
|
| 229 |
+
"How well does the model distinguish between subtle variations in style (e.g., modern vs. postmodern art)?",
|
| 230 |
+
"What style does the model tend to generate most often?",
|
| 231 |
+
"Does the model adapt to different stylistic instructions with accuracy and consistency?",
|
| 232 |
+
"How precisely can the user specify object relationships?",
|
| 233 |
+
"How well can the model maintain object proportions and spatial relationships in complex scenes?",
|
| 234 |
+
"How effectively does the model handle changes in perspective?",
|
| 235 |
+
"Does the model demonstrate flexibility in generating different compositions from similar prompts?",
|
| 236 |
+
"How precisely can I control the generative model?",
|
| 237 |
+
"Does the model tend to simplify complex objects or details, or does it retain high levels of intricacy?",
|
| 238 |
+
"Can the model adjust its output when given slight variations of the same prompt (e.g., changes in tone or context)?",
|
| 239 |
+
"How well does the model handle varying levels of detail in its outputs?",
|
| 240 |
+
"Does the model generate similar outputs when there are only small changes in the prompts?",
|
| 241 |
+
"How well does the model balance contrasting elements, such as light and dark or complex and simple?",
|
| 242 |
+
"To what extent can the model produce photorealistic and aesthetically pleasing results?",
|
| 243 |
+
"Can the model generate realistic images without common AI-generated artifacts?",
|
| 244 |
+
"Can it generate physically plausible results?",
|
| 245 |
+
"How realistic is the generated content?",
|
| 246 |
+
"Can it generate results that are as realistic as possible?",
|
| 247 |
+
"How real are the generated results?",
|
| 248 |
+
"Can the model generate high-fidelity content?",
|
| 249 |
+
"Can the model generate real-life scenes?",
|
| 250 |
+
"Can the model maintain high image quality when generating large or detailed scenes?",
|
| 251 |
+
"Does the model tend to generate more photorealistic images or stylized, cartoonish ones?",
|
| 252 |
+
"Can the model mimic camera effects such as zooming, panning, or focus pulls, especially when not explicitly described in the prompt?",
|
| 253 |
+
"How well can the model balance aesthetic elements like space, flow, and symmetry in a room setting?",
|
| 254 |
+
"Can the model be effectively applied in different scenarios, such as varying lighting conditions and environments?",
|
| 255 |
+
"How well can the model handle lighting and movement?",
|
| 256 |
+
"How effectively can the model simulate real-world lighting effects for interior and exterior spaces?",
|
| 257 |
+
"Can the model generate high-fidelity visualizations for natural phenomena like fluid dynamics or planetary orbits?",
|
| 258 |
+
"How well does the model handle dynamic elements like action or motion?",
|
| 259 |
+
"How well can the model interpret abstract or metaphorical concepts? Does it translate non-literal ideas into visual form effectively?",
|
| 260 |
+
"How well can the model present concepts to explore creative possibilities?",
|
| 261 |
+
"Can the model blend different genres or themes successfully?",
|
| 262 |
+
"How well does the model balance realism with creativity? Can it generate imaginative scenes while maintaining visual plausibility?",
|
| 263 |
+
"How well does the model capture the mood or atmosphere of a given prompt?",
|
| 264 |
+
"How effectively can the model represent non-visual concepts, such as emotions or abstract ideas?",
|
| 265 |
+
"How does the model handle the depiction of abstract concepts or metaphors in video form? For example, can it animate a 'storm of emotions' or 'time flowing like water'?",
|
| 266 |
+
"Can the model handle contradictory visual cues in a prompt? For example, can it create a 'sunset during a snowstorm' or a 'frozen ocean with waves'?",
|
| 267 |
+
"How well can the model integrate symbolic or surreal elements, like a tree growing out of someone's hand, while keeping the scene coherent and natural?",
|
| 268 |
+
"Can the model render entirely fictional creatures or physics-defying actions like characters walking on walls or gravity reversing?",
|
| 269 |
+
"Can the model generate content that evolve dynamically, where the scene or characters change in response to the user's previous inputs?",
|
| 270 |
+
"How well can the model interpret narrative-driven prompts where characters are engaging in emotionally complex interactions without explicit action descriptors?",
|
| 271 |
+
"How does the model handle unusual or impossible lighting conditions, like 'darkness lit only by the glow of thoughts' or 'a world where shadows are brighter than light'?",
|
| 272 |
+
"Can the model generate variations of existing artwork while maintaining the original style?",
|
| 273 |
+
"Is the generated content sufficiently diverse, including rare or niche fields?",
|
| 274 |
+
"How well the model can generate a specific number of objects?",
|
| 275 |
+
"Does the model tend to overuse specific colors or textures, or does it offer diverse visual outputs?",
|
| 276 |
+
"Does the model generate diverse results for the same prompt, or does it repeat patterns?",
|
| 277 |
+
"Can the model generate images that visually suggest different stages of a continuous event or process (e.g., sunrise to sunset progression)?",
|
| 278 |
+
"How does the model handle prompts with explicit restrictions, like generating an image ‘with no use of circular shapes’ or ‘without blue hues’?",
|
| 279 |
+
"How quickly can the model generate a 3–5-second video with a specific theme?",
|
| 280 |
+
"Can the model differentiate the relationships of characters in the prompts?",
|
| 281 |
+
"How well does the model handle varying levels of detail in its outputs?"
|
| 282 |
+
],
|
| 283 |
+
"Film and Entertainment": [
|
| 284 |
+
"How well can the model generate realistic scenes for a sci-fi film?",
|
| 285 |
+
"Can the model create videos with non-linear narratives, such as time jumps or reverse chronology? Can it handle multiple timelines that converge or diverge?",
|
| 286 |
+
"How effectively can the model visualize fictional worlds or characters with great detail?",
|
| 287 |
+
"Can the model understand and apply photographic concepts like focal length, aperture, or ISO?",
|
| 288 |
+
"Can the model generate complex storyboards incorporating multiple objects, characters, and settings in one scene?"
|
| 289 |
+
],
|
| 290 |
+
"Medical": [
|
| 291 |
+
"How effectively can the model manage physical realism, such as lighting, shadows, and accurate human anatomy?",
|
| 292 |
+
"How well does the model interpret abstract emotional concepts into visual formats?",
|
| 293 |
+
"How accurately can the model depict human expressions or body language for psychological studies?",
|
| 294 |
+
"Can the model generate different animal anatomical structures?",
|
| 295 |
+
"Can the model generate detailed steps of surgical procedures that cannot be shown in typical surgery videos?",
|
| 296 |
+
"How accurately can the model generate medical images like X-rays or MRI scans from brief descriptions?",
|
| 297 |
+
"How well can the model generate 3D visualizations of internal organs or surgical procedures?",
|
| 298 |
+
"Can the model generate objects with highly structured textures, such as accurately rendering five fingers on a hand?"
|
| 299 |
+
],
|
| 300 |
+
"Game Design": [
|
| 301 |
+
"Can the model generate detailed game environments such as forests, cities, or futuristic settings?",
|
| 302 |
+
"Can the model generate anime characters?",
|
| 303 |
+
"How precisely can the model replicate specific art styles, such as pixel art or 3D-rendered graphics?",
|
| 304 |
+
"How well does the model interpret perspective and depth in a scene?",
|
| 305 |
+
"Can the model generate geometric and texture effects?",
|
| 306 |
+
"Can the model generate game characters with intricate details like armor or facial expressions?"
|
| 307 |
+
],
|
| 308 |
+
"Science and Education": [
|
| 309 |
+
"Can the model generate flowcharts effectively?",
|
| 310 |
+
"How well can the model generate presentation slides?",
|
| 311 |
+
"Can the model generate precise visual representations of complex mathematical graphs or formulas?",
|
| 312 |
+
"How accurately does the model generate scientific diagrams like molecular structures or physics simulations?"
|
| 313 |
+
],
|
| 314 |
+
"Law": [
|
| 315 |
+
"Can the model generate clear, courtroom-friendly diagrams to illustrate complex legal cases?",
|
| 316 |
+
"How well can the model recreate a crime scene based on textual descriptions or evidence?",
|
| 317 |
+
"How accurately does the model generate facial composites based on a witness’s description?"
|
| 318 |
+
],
|
| 319 |
+
"Architecture and Interior Design": [
|
| 320 |
+
"How well can the model generate accurate, scalable floor plans or blueprints?",
|
| 321 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?",
|
| 322 |
+
"How accurately can the model generate detailed room layouts with attention to furniture, textures, and color schemes?",
|
| 323 |
+
"Can the model handle fine detailing of various materials like wood, metal, or fabric in generated interiors?",
|
| 324 |
+
"Can the model generate structurally sound designs based on vague architectural concepts?"
|
| 325 |
+
],
|
| 326 |
+
"History and Culture": [
|
| 327 |
+
"Can the model handle complex, culturally specific objects, symbols, or rituals?",
|
| 328 |
+
"How accurately can the model recreate historical scenes, objects, or clothing based on text inputs?",
|
| 329 |
+
"Can the model accurately depict diverse cultural or historical elements?"
|
| 330 |
+
],
|
| 331 |
+
"Fashion": [
|
| 332 |
+
"Can the model accurately generate fashion sketches with fine detailing of fabrics and textures?",
|
| 333 |
+
"How well can the model represent subtle textures such as skin, fabric, or foliage?",
|
| 334 |
+
"Can the model blend or fuse different design elements (e.g., mix traditional and modern fashion elements in one outfit)?",
|
| 335 |
+
"Is the model capable of generating designs tailored to individual body types or preferences?",
|
| 336 |
+
"How well does the model handle creating looks for specific occasions, such as casual, formal, or streetwear?",
|
| 337 |
+
"How well can the model represent various fashion trends or styles based on vague descriptions?"
|
| 338 |
+
]
|
| 339 |
+
}
|
| 340 |
+
}
|
debug_model_output.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Debug script to analyze model output issues and test structured generation.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import requests
|
| 8 |
+
import argparse
|
| 9 |
+
from typing import List, Dict, Any, Optional
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def call_model(message: str, model_url: str = "http://0.0.0.0:12333/v1/chat/completions",
|
| 13 |
+
model_name: str = "eval-agent", system: str = "", temperature: float = 0.1,
|
| 14 |
+
max_tokens: int = 512) -> Optional[str]:
|
| 15 |
+
"""Call the model with specific parameters for debugging."""
|
| 16 |
+
|
| 17 |
+
messages = []
|
| 18 |
+
if system:
|
| 19 |
+
messages.append({"role": "system", "content": system})
|
| 20 |
+
messages.append({"role": "user", "content": message})
|
| 21 |
+
|
| 22 |
+
payload = {
|
| 23 |
+
"model": model_name,
|
| 24 |
+
"messages": messages,
|
| 25 |
+
"max_tokens": max_tokens,
|
| 26 |
+
"temperature": temperature,
|
| 27 |
+
"stream": False
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
response = requests.post(model_url, json=payload, timeout=60)
|
| 32 |
+
response.raise_for_status()
|
| 33 |
+
result = response.json()
|
| 34 |
+
return result["choices"][0]["message"]["content"]
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"Error: {e}")
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def test_structured_output():
|
| 41 |
+
"""Test various prompts to debug structured output issues."""
|
| 42 |
+
|
| 43 |
+
print("🔍 DEBUGGING MODEL STRUCTURED OUTPUT")
|
| 44 |
+
print("="*60)
|
| 45 |
+
|
| 46 |
+
# Test cases with different complexity levels
|
| 47 |
+
test_cases = [
|
| 48 |
+
{
|
| 49 |
+
"name": "Simple Structure Test",
|
| 50 |
+
"prompt": "Please respond with: <think>test thought</think> <subaspect>test aspect</subaspect> <tool>test tool</tool>",
|
| 51 |
+
"system": "",
|
| 52 |
+
"temperature": 0.0
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"name": "VBench Format Test",
|
| 56 |
+
"prompt": "How well does the model generate objects?",
|
| 57 |
+
"system": "You must respond in this exact format: <think>your reasoning</think> <subaspect>specific aspect</subaspect> <tool>evaluation tool</tool>",
|
| 58 |
+
"temperature": 0.1
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"name": "Training Data Example",
|
| 62 |
+
"prompt": "How accurately does the model generate specific object classes as described in the text prompt?",
|
| 63 |
+
"system": """You are an expert in evaluating video generation models. You must respond in this exact format:
|
| 64 |
+
|
| 65 |
+
<think>Your detailed reasoning about what to evaluate</think> <subaspect>The specific aspect to focus on</subaspect> <tool>Object Class</tool>
|
| 66 |
+
|
| 67 |
+
Available tools: Object Class, Scene, Color, Spatial Relationship, Human Action, Dynamic Degree, Multiple Objects, Overall Consistency, Aesthetic Quality, Imaging Quality, Motion Smoothness, Subject Consistency, Background Consistency""",
|
| 68 |
+
"temperature": 0.0
|
| 69 |
+
}
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
for i, test in enumerate(test_cases, 1):
|
| 73 |
+
print(f"\n{i}. {test['name']}")
|
| 74 |
+
print("-" * 40)
|
| 75 |
+
print(f"Prompt: {test['prompt'][:100]}...")
|
| 76 |
+
print(f"Temperature: {test['temperature']}")
|
| 77 |
+
|
| 78 |
+
response = call_model(
|
| 79 |
+
message=test['prompt'],
|
| 80 |
+
system=test['system'],
|
| 81 |
+
temperature=test['temperature']
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
if response:
|
| 85 |
+
print(f"Response: {response}")
|
| 86 |
+
|
| 87 |
+
# Analyze structure
|
| 88 |
+
has_think = "<think>" in response and "</think>" in response
|
| 89 |
+
has_subaspect = "<subaspect>" in response and "</subaspect>" in response
|
| 90 |
+
has_tool = "<tool>" in response and "</tool>" in response
|
| 91 |
+
|
| 92 |
+
print(f"Structure Analysis:")
|
| 93 |
+
print(f" ✅ Has <think> tags: {has_think}")
|
| 94 |
+
print(f" ✅ Has <subaspect> tags: {has_subaspect}")
|
| 95 |
+
print(f" ✅ Has <tool> tags: {has_tool}")
|
| 96 |
+
print(f" ✅ All tags present: {has_think and has_subaspect and has_tool}")
|
| 97 |
+
|
| 98 |
+
# Check for common errors
|
| 99 |
+
errors = []
|
| 100 |
+
if "<think>" in response and "</tool>" in response and "</think>" not in response:
|
| 101 |
+
errors.append("Missing </think> closing tag")
|
| 102 |
+
if "Object Class</tool>" in response:
|
| 103 |
+
errors.append("Tool name in wrong tag")
|
| 104 |
+
if len([tag for tag in ["<think>", "<subaspect>", "<tool>"] if tag in response]) != len([tag for tag in ["</think>", "</subaspect>", "</tool>"] if tag in response]):
|
| 105 |
+
errors.append("Mismatched opening/closing tags")
|
| 106 |
+
|
| 107 |
+
if errors:
|
| 108 |
+
print(f" ❌ Errors found: {', '.join(errors)}")
|
| 109 |
+
else:
|
| 110 |
+
print("❌ No response received")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_temperature_effects():
|
| 114 |
+
"""Test how temperature affects structured output quality."""
|
| 115 |
+
|
| 116 |
+
print("\n\n🌡️ TEMPERATURE EFFECTS ON STRUCTURED OUTPUT")
|
| 117 |
+
print("="*60)
|
| 118 |
+
|
| 119 |
+
prompt = "How accurately does the model generate specific object classes?"
|
| 120 |
+
system = "Respond in format: <think>reasoning</think> <subaspect>aspect</subaspect> <tool>Object Class</tool>"
|
| 121 |
+
|
| 122 |
+
temperatures = [0.0, 0.1, 0.3, 0.7, 1.0]
|
| 123 |
+
|
| 124 |
+
for temp in temperatures:
|
| 125 |
+
print(f"\nTemperature: {temp}")
|
| 126 |
+
print("-" * 30)
|
| 127 |
+
|
| 128 |
+
response = call_model(
|
| 129 |
+
message=prompt,
|
| 130 |
+
system=system,
|
| 131 |
+
temperature=temp,
|
| 132 |
+
max_tokens=200
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
if response:
|
| 136 |
+
print(f"Response: {response[:150]}...")
|
| 137 |
+
|
| 138 |
+
# Check if structure is maintained
|
| 139 |
+
correct_structure = (
|
| 140 |
+
"<think>" in response and "</think>" in response and
|
| 141 |
+
"<subaspect>" in response and "</subaspect>" in response and
|
| 142 |
+
"<tool>" in response and "</tool>" in response
|
| 143 |
+
)
|
| 144 |
+
print(f"Correct structure: {'✅' if correct_structure else '❌'}")
|
| 145 |
+
else:
|
| 146 |
+
print("❌ No response")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def analyze_training_sample():
|
| 150 |
+
"""Analyze a training sample to understand expected format."""
|
| 151 |
+
|
| 152 |
+
print("\n\n📚 TRAINING DATA ANALYSIS")
|
| 153 |
+
print("="*60)
|
| 154 |
+
|
| 155 |
+
# Load a training sample
|
| 156 |
+
try:
|
| 157 |
+
with open("data/postprocess_20250819/ea_cot_dataset_10k.json", 'r') as f:
|
| 158 |
+
data = json.load(f)
|
| 159 |
+
|
| 160 |
+
sample = data[0] # First sample
|
| 161 |
+
|
| 162 |
+
print("Training Sample:")
|
| 163 |
+
print(f"Instruction: {sample['instruction']}")
|
| 164 |
+
print(f"Expected Output: {sample['output']}")
|
| 165 |
+
|
| 166 |
+
# Test model with exact training example
|
| 167 |
+
print("\n🧪 Testing with exact training example:")
|
| 168 |
+
response = call_model(
|
| 169 |
+
message=sample['instruction'],
|
| 170 |
+
system=sample.get('system', ''),
|
| 171 |
+
temperature=0.0
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
print(f"Model Response: {response}")
|
| 175 |
+
|
| 176 |
+
# Compare
|
| 177 |
+
expected = sample['output']
|
| 178 |
+
if response and expected in response:
|
| 179 |
+
print("✅ Model output matches training data!")
|
| 180 |
+
else:
|
| 181 |
+
print("❌ Model output differs from training data")
|
| 182 |
+
|
| 183 |
+
# Detailed comparison
|
| 184 |
+
if response:
|
| 185 |
+
print("\nDetailed Analysis:")
|
| 186 |
+
print(f"Expected think: {expected[expected.find('<think>')+7:expected.find('</think>')][:50]}...")
|
| 187 |
+
print(f"Expected subaspect: {expected[expected.find('<subaspect>')+11:expected.find('</subaspect>')]}")
|
| 188 |
+
print(f"Expected tool: {expected[expected.find('<tool>')+6:expected.find('</tool>')]}")
|
| 189 |
+
|
| 190 |
+
if '<think>' in response:
|
| 191 |
+
think_content = response[response.find('<think>')+7:response.find('</think>')] if '</think>' in response else "INCOMPLETE"
|
| 192 |
+
print(f"Actual think: {think_content[:50]}...")
|
| 193 |
+
|
| 194 |
+
except Exception as e:
|
| 195 |
+
print(f"Could not load training data: {e}")
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def main():
|
| 199 |
+
parser = argparse.ArgumentParser(description="Debug model structured output issues")
|
| 200 |
+
parser.add_argument("--model_url", default="http://0.0.0.0:12333/v1/chat/completions")
|
| 201 |
+
parser.add_argument("--model_name", default="eval-agent")
|
| 202 |
+
|
| 203 |
+
args = parser.parse_args()
|
| 204 |
+
|
| 205 |
+
# Test connection first
|
| 206 |
+
print("🔗 Testing connection...")
|
| 207 |
+
response = call_model("Hello", model_url=args.model_url, model_name=args.model_name)
|
| 208 |
+
if not response:
|
| 209 |
+
print("❌ Cannot connect to model server")
|
| 210 |
+
return
|
| 211 |
+
|
| 212 |
+
print("✅ Connected successfully!")
|
| 213 |
+
|
| 214 |
+
# Run all tests
|
| 215 |
+
test_structured_output()
|
| 216 |
+
test_temperature_effects()
|
| 217 |
+
analyze_training_sample()
|
| 218 |
+
|
| 219 |
+
print("\n\n💡 RECOMMENDATIONS:")
|
| 220 |
+
print("="*60)
|
| 221 |
+
print("1. Use temperature=0.0 or very low temperature for structured output")
|
| 222 |
+
print("2. Include explicit format instructions in system prompt")
|
| 223 |
+
print("3. Consider retraining with more structured output examples")
|
| 224 |
+
print("4. Add format validation in your evaluation pipeline")
|
| 225 |
+
print("5. Use constrained generation or parsing to fix malformed output")
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
if __name__ == "__main__":
|
| 229 |
+
main()
|
eval_agent/__init__.py
ADDED
|
File without changes
|
eval_agent/base_agent.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
import json
|
| 3 |
+
# from vllm import LLM, SamplingParams
|
| 4 |
+
import requests
|
| 5 |
+
|
| 6 |
+
class BaseAgent:
|
| 7 |
+
def __init__(self, system_prompt="", use_history=True, temp=0.5, top_p=0.95):
|
| 8 |
+
self.use_history = use_history
|
| 9 |
+
self.client = OpenAI()
|
| 10 |
+
self.system = system_prompt
|
| 11 |
+
self.temp = temp
|
| 12 |
+
self.top_p = top_p
|
| 13 |
+
self.input_tokens_count = 0
|
| 14 |
+
self.output_tokens_count = 0
|
| 15 |
+
self.messages = []
|
| 16 |
+
if self.system:
|
| 17 |
+
self.messages.append({"role": "system", "content": system_prompt})
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def __call__(self, message, parse=False):
|
| 21 |
+
self.messages.append({"role": "user", "content": message})
|
| 22 |
+
result = self.generate(message, parse)
|
| 23 |
+
self.messages.append({"role": "assistant", "content": result})
|
| 24 |
+
|
| 25 |
+
if parse:
|
| 26 |
+
try:
|
| 27 |
+
result = self.parse_json(result)
|
| 28 |
+
except:
|
| 29 |
+
raise Exception("Error content is list below:\n", result)
|
| 30 |
+
|
| 31 |
+
return result
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def generate(self, message, json_format):
|
| 36 |
+
if self.use_history:
|
| 37 |
+
input_messages = self.messages
|
| 38 |
+
else:
|
| 39 |
+
input_messages = [
|
| 40 |
+
{"role": "system", "content": self.system},
|
| 41 |
+
{"role": "user", "content": message}
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if json_format:
|
| 46 |
+
response = self.client.chat.completions.create(
|
| 47 |
+
model="gpt-4o-2024-08-06", # gpt-4
|
| 48 |
+
messages=input_messages,
|
| 49 |
+
temperature=self.temp,
|
| 50 |
+
top_p=self.top_p,
|
| 51 |
+
response_format = { "type": "json_object" }
|
| 52 |
+
)
|
| 53 |
+
else:
|
| 54 |
+
response = self.client.chat.completions.create(
|
| 55 |
+
model="gpt-4o-2024-08-06", # gpt-4
|
| 56 |
+
messages=input_messages,
|
| 57 |
+
temperature=self.temp,
|
| 58 |
+
top_p=self.top_p,
|
| 59 |
+
)
|
| 60 |
+
self.update_tokens_count(response)
|
| 61 |
+
return response.choices[0].message.content
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def parse_json(self, response):
|
| 65 |
+
return json.loads(response)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def add(self, message: dict):
|
| 69 |
+
self.messages.append(message)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def update_tokens_count(self, response):
|
| 73 |
+
self.input_tokens_count += response.usage.prompt_tokens
|
| 74 |
+
self.output_tokens_count += response.usage.completion_tokens
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def show_usage(self):
|
| 78 |
+
print(f"Total input tokens used: {self.input_tokens_count}\nTotal output tokens used: {self.output_tokens_count}")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class BaseAgent_SFT:
|
| 82 |
+
def __init__(self, system_prompt="", use_history=True, temp=0, top_p=1, model_name_or_path="http://0.0.0.0:12333/v1/chat/completions"):
|
| 83 |
+
self.use_history = use_history
|
| 84 |
+
if not model_name_or_path.startswith("http"):
|
| 85 |
+
self.client = LLM(model=model_name_or_path, tokenizer=model_name_or_path, gpu_memory_utilization=0.5, tensor_parallel_size=1)
|
| 86 |
+
self.api = False
|
| 87 |
+
else:
|
| 88 |
+
self.client = model_name_or_path
|
| 89 |
+
self.model_name = "eval-agent"
|
| 90 |
+
self.api = True
|
| 91 |
+
self.system = system_prompt
|
| 92 |
+
self.temp = temp
|
| 93 |
+
self.top_p = top_p
|
| 94 |
+
self.input_tokens_count = 0
|
| 95 |
+
self.output_tokens_count = 0
|
| 96 |
+
self.messages = []
|
| 97 |
+
if self.system:
|
| 98 |
+
self.messages.append({"role": "system", "content": system_prompt})
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def __call__(self, message):
|
| 102 |
+
self.messages.append({"role": "user", "content": message})
|
| 103 |
+
result = self.generate(message)
|
| 104 |
+
self.messages.append({"role": "assistant", "content": result})
|
| 105 |
+
|
| 106 |
+
return result
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def generate(self, message):
|
| 110 |
+
if self.use_history:
|
| 111 |
+
input_messages = self.messages
|
| 112 |
+
else:
|
| 113 |
+
input_messages = [
|
| 114 |
+
{"role": "system", "content": self.system},
|
| 115 |
+
{"role": "user", "content": message}
|
| 116 |
+
]
|
| 117 |
+
|
| 118 |
+
if self.api:
|
| 119 |
+
payload = {
|
| 120 |
+
"model": self.model_name,
|
| 121 |
+
"messages": input_messages,
|
| 122 |
+
"max_tokens": 1024,
|
| 123 |
+
"temperature": self.temp,
|
| 124 |
+
"top_p": self.top_p,
|
| 125 |
+
"stream": False
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
for _ in range(3):
|
| 129 |
+
try:
|
| 130 |
+
response = requests.post(self.client, json=payload, timeout=120)
|
| 131 |
+
response.raise_for_status()
|
| 132 |
+
result = response.json()
|
| 133 |
+
return result["choices"][0]["message"]["content"]
|
| 134 |
+
|
| 135 |
+
except requests.exceptions.RequestException as e:
|
| 136 |
+
print(f"❌ API request failed: {e}")
|
| 137 |
+
continue
|
| 138 |
+
|
| 139 |
+
except (KeyError, IndexError) as e:
|
| 140 |
+
print(f"❌ Unexpected response format: {e}")
|
| 141 |
+
continue
|
| 142 |
+
return None
|
| 143 |
+
else:
|
| 144 |
+
response = self.client.generate(
|
| 145 |
+
input_messages,
|
| 146 |
+
sampling_params=SamplingParams(
|
| 147 |
+
max_tokens=1024,
|
| 148 |
+
temperature=self.temp,
|
| 149 |
+
top_p=self.top_p,
|
| 150 |
+
n=1,
|
| 151 |
+
),
|
| 152 |
+
)
|
| 153 |
+
return response[0].outputs[0].text
|
| 154 |
+
|
| 155 |
+
class BaseAgent_Open:
|
| 156 |
+
def __init__(self, system_prompt="", use_history=True, temp=0, top_p=1, model_name_or_path="Qwen/Qwen2.5-3B-Instruct"):
|
| 157 |
+
self.use_history = use_history
|
| 158 |
+
self.client = LLM(model=model_name_or_path, tokenizer=model_name_or_path, gpu_memory_utilization=0.5, tensor_parallel_size=1)
|
| 159 |
+
self.tokenizer = self.client.get_tokenizer()
|
| 160 |
+
self.system = system_prompt
|
| 161 |
+
self.temp = temp
|
| 162 |
+
self.top_p = top_p
|
| 163 |
+
self.messages = []
|
| 164 |
+
if self.system:
|
| 165 |
+
self.messages.append({"role": "system", "content": system_prompt})
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def __call__(self, message):
|
| 169 |
+
self.messages.append({"role": "user", "content": message})
|
| 170 |
+
result = self.generate(message)
|
| 171 |
+
self.messages.append({"role": "assistant", "content": result})
|
| 172 |
+
|
| 173 |
+
return result
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def generate(self, message):
|
| 177 |
+
if self.use_history:
|
| 178 |
+
input_messages = self.messages
|
| 179 |
+
else:
|
| 180 |
+
input_messages = [
|
| 181 |
+
{"role": "system", "content": self.system},
|
| 182 |
+
{"role": "user", "content": message}
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
# Convert messages to string using tokenizer's chat template
|
| 186 |
+
prompt = self.tokenizer.apply_chat_template(
|
| 187 |
+
input_messages,
|
| 188 |
+
tokenize=False,
|
| 189 |
+
add_generation_prompt=True
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
response = self.client.generate(
|
| 193 |
+
prompt,
|
| 194 |
+
sampling_params=SamplingParams(
|
| 195 |
+
max_tokens=1024,
|
| 196 |
+
temperature=self.temp,
|
| 197 |
+
top_p=self.top_p,
|
| 198 |
+
n=1,
|
| 199 |
+
),
|
| 200 |
+
)
|
| 201 |
+
return response[0].outputs[0].text
|
| 202 |
+
|
eval_agent/check_query_completeness.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script to check if each query has at least 10 eval_results.json files
|
| 4 |
+
across all subfolders (numbered and supplementary) in VBench evaluation results.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import json
|
| 9 |
+
import re
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Dict, List, Set
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
queries_to_evaluate = [
|
| 16 |
+
"How does the model perform in terms of aesthetics?",
|
| 17 |
+
"How well does the model ensure that the subject maintains a consistent appearance throughout the video?",
|
| 18 |
+
"How effectively does the model maintain a consistent background scene throughout the video?",
|
| 19 |
+
"How well does the model produce smooth and natural motion that follows the physical laws of the real world?",
|
| 20 |
+
"To what extent are distortions like over-exposure, noise, and blur present in the generated frames?",
|
| 21 |
+
"How consistently does the visual style (e.g., oil painting, black and white, watercolor) align with the specified look throughout the video?",
|
| 22 |
+
"How consistent are the time-based effects and camera motions throughout the video?",
|
| 23 |
+
"How well does the generated video demonstrate overall consistency with the input prompt?",
|
| 24 |
+
"How effectively does the model generate multiple distinct objects in a single scene?",
|
| 25 |
+
"How accurately does the model generate specific object classes as described in the text prompt?",
|
| 26 |
+
"To what extent does the video exhibit dynamic movement rather than being overly static?",
|
| 27 |
+
"How accurately do human subjects in the video perform the actions described in the text prompt?",
|
| 28 |
+
"How accurately do the colors of the generated objects match the specifications in the text prompt?",
|
| 29 |
+
"How accurately does the spatial arrangement of objects reflect the positioning and relationships described in the text prompt?",
|
| 30 |
+
"How accurately does the generated video represent the scene described in the text prompt?",
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
def extract_query_from_folder_name(folder_name: str) -> str:
|
| 34 |
+
"""
|
| 35 |
+
Extract query from a dimension folder name.
|
| 36 |
+
Format: date-time-query_with_underscores
|
| 37 |
+
"""
|
| 38 |
+
# Look for pattern like "HH:MM:SS-query"
|
| 39 |
+
match = re.search(r'\d{2}:\d{2}:\d{2}-(.+)', folder_name)
|
| 40 |
+
if match:
|
| 41 |
+
query = match.group(1).replace('_', ' ')
|
| 42 |
+
if not query.endswith('?'):
|
| 43 |
+
query += '?'
|
| 44 |
+
return query
|
| 45 |
+
|
| 46 |
+
# For folders without timestamp, try direct extraction
|
| 47 |
+
# This handles cases where the folder name is just the query
|
| 48 |
+
if '_' in folder_name or ' ' in folder_name:
|
| 49 |
+
query = folder_name.replace('_', ' ')
|
| 50 |
+
if not query.endswith('?'):
|
| 51 |
+
query += '?'
|
| 52 |
+
return query
|
| 53 |
+
|
| 54 |
+
return None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def count_query_occurrences(model_path: str, min_required: int = 10, expected_queries: List[str] = None) -> Dict[str, Dict]:
|
| 58 |
+
"""
|
| 59 |
+
Count how many eval_results.json files exist for each query across all subfolders.
|
| 60 |
+
Recursively searches all directories for eval_results.json files.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
model_path: Path to the model folder (e.g., eval_vbench_results/modelscope)
|
| 64 |
+
min_required: Minimum number of eval_results.json files required per query (default: 10)
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
Dictionary with query statistics and missing queries
|
| 68 |
+
"""
|
| 69 |
+
model_path = Path(model_path)
|
| 70 |
+
|
| 71 |
+
if not model_path.exists():
|
| 72 |
+
print(f"Error: Path {model_path} does not exist!")
|
| 73 |
+
return {}
|
| 74 |
+
|
| 75 |
+
# Track query occurrences: query -> list of (path, folder) where it exists
|
| 76 |
+
query_occurrences = defaultdict(list)
|
| 77 |
+
|
| 78 |
+
# Track all unique queries found
|
| 79 |
+
all_queries = set()
|
| 80 |
+
|
| 81 |
+
print(f"Scanning model folder: {model_path.name}")
|
| 82 |
+
print("=" * 80)
|
| 83 |
+
|
| 84 |
+
# Find all eval_results.json files recursively
|
| 85 |
+
eval_files = list(model_path.rglob("eval_results.json"))
|
| 86 |
+
print(f"Found {len(eval_files)} eval_results.json files")
|
| 87 |
+
|
| 88 |
+
# Process each eval_results.json file
|
| 89 |
+
for eval_file in eval_files:
|
| 90 |
+
# Get the parent folder that contains this eval_results.json
|
| 91 |
+
parent_folder = eval_file.parent
|
| 92 |
+
|
| 93 |
+
# Try to extract query from the folder name
|
| 94 |
+
# Check if it's in a videos subfolder
|
| 95 |
+
if parent_folder.name == "videos":
|
| 96 |
+
query_folder = parent_folder.parent
|
| 97 |
+
else:
|
| 98 |
+
query_folder = parent_folder
|
| 99 |
+
|
| 100 |
+
# Extract query from folder name
|
| 101 |
+
query = extract_query_from_folder_name(query_folder.name)
|
| 102 |
+
|
| 103 |
+
if query:
|
| 104 |
+
all_queries.add(query)
|
| 105 |
+
|
| 106 |
+
# Validate the JSON file
|
| 107 |
+
try:
|
| 108 |
+
with open(eval_file, 'r') as f:
|
| 109 |
+
json.load(f)
|
| 110 |
+
|
| 111 |
+
# Get relative path from model folder
|
| 112 |
+
relative_path = eval_file.relative_to(model_path)
|
| 113 |
+
|
| 114 |
+
# Record this occurrence with the relative path
|
| 115 |
+
query_occurrences[query].append(str(relative_path))
|
| 116 |
+
|
| 117 |
+
except (json.JSONDecodeError, Exception) as e:
|
| 118 |
+
# Don't count invalid JSON files
|
| 119 |
+
print(f" Invalid JSON in {eval_file}: {e}")
|
| 120 |
+
|
| 121 |
+
# Analyze results
|
| 122 |
+
results = {
|
| 123 |
+
'all_queries': sorted(all_queries),
|
| 124 |
+
'query_counts': {},
|
| 125 |
+
'insufficient_queries': [],
|
| 126 |
+
'missing_completely': [],
|
| 127 |
+
'statistics': {
|
| 128 |
+
'total_unique_queries': len(all_queries),
|
| 129 |
+
'queries_with_sufficient_results': 0,
|
| 130 |
+
'queries_with_insufficient_results': 0,
|
| 131 |
+
'queries_missing_completely': 0
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
# If expected queries provided, check for completely missing ones
|
| 136 |
+
if expected_queries:
|
| 137 |
+
for query in expected_queries:
|
| 138 |
+
if query not in all_queries:
|
| 139 |
+
all_queries.add(query)
|
| 140 |
+
results['missing_completely'].append(query)
|
| 141 |
+
results['statistics']['queries_missing_completely'] += 1
|
| 142 |
+
results['query_counts'][query] = {
|
| 143 |
+
'count': 0,
|
| 144 |
+
'locations': []
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
# Check each query
|
| 148 |
+
for query in all_queries:
|
| 149 |
+
if query not in results['missing_completely']: # Skip if already marked as missing
|
| 150 |
+
count = len(query_occurrences[query])
|
| 151 |
+
results['query_counts'][query] = {
|
| 152 |
+
'count': count,
|
| 153 |
+
'locations': query_occurrences[query]
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
if count == 0:
|
| 157 |
+
results['missing_completely'].append(query)
|
| 158 |
+
results['statistics']['queries_missing_completely'] += 1
|
| 159 |
+
elif count < min_required:
|
| 160 |
+
results['insufficient_queries'].append({
|
| 161 |
+
'query': query,
|
| 162 |
+
'count': count,
|
| 163 |
+
'needed': min_required - count
|
| 164 |
+
})
|
| 165 |
+
results['statistics']['queries_with_insufficient_results'] += 1
|
| 166 |
+
else:
|
| 167 |
+
results['statistics']['queries_with_sufficient_results'] += 1
|
| 168 |
+
|
| 169 |
+
# Update total unique queries count
|
| 170 |
+
results['statistics']['total_unique_queries'] = len(all_queries)
|
| 171 |
+
|
| 172 |
+
return results
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def save_insufficient_queries(results: Dict, output_file: str, min_required: int = 10):
|
| 176 |
+
"""
|
| 177 |
+
Save queries with insufficient eval_results.json files to a text file.
|
| 178 |
+
If a query appears less than min_required times, it will be repeated
|
| 179 |
+
to indicate how many more times it needs to be evaluated.
|
| 180 |
+
"""
|
| 181 |
+
with open(output_file, 'w') as f:
|
| 182 |
+
# Write insufficient queries (repeated based on how many more are needed)
|
| 183 |
+
for item in results.get('insufficient_queries', []):
|
| 184 |
+
query = item['query']
|
| 185 |
+
needed = item['needed']
|
| 186 |
+
# Write the query 'needed' times
|
| 187 |
+
for _ in range(needed):
|
| 188 |
+
f.write(query + '\n')
|
| 189 |
+
|
| 190 |
+
# Write completely missing queries min_required times
|
| 191 |
+
for query in results.get('missing_completely', []):
|
| 192 |
+
for _ in range(min_required):
|
| 193 |
+
f.write(query + '\n')
|
| 194 |
+
|
| 195 |
+
total_lines = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
|
| 196 |
+
len(results.get('missing_completely', [])) * min_required
|
| 197 |
+
|
| 198 |
+
print(f"\nSaved {total_lines} query lines to {output_file}")
|
| 199 |
+
print(f"(Queries needing multiple evaluations are repeated)")
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def generate_completeness_report(results: Dict, min_required: int = 10) -> str:
|
| 203 |
+
"""Generate a detailed report of query completeness."""
|
| 204 |
+
|
| 205 |
+
if not results:
|
| 206 |
+
return "No results to report."
|
| 207 |
+
|
| 208 |
+
report = []
|
| 209 |
+
report.append("=" * 80)
|
| 210 |
+
report.append("QUERY COMPLETENESS REPORT")
|
| 211 |
+
report.append(f"Minimum required eval_results.json files per query: {min_required}")
|
| 212 |
+
report.append("=" * 80)
|
| 213 |
+
|
| 214 |
+
stats = results['statistics']
|
| 215 |
+
report.append("\n📊 STATISTICS:")
|
| 216 |
+
report.append(f" Total unique queries found: {stats['total_unique_queries']}")
|
| 217 |
+
report.append(f" Queries with sufficient results (>={min_required}): {stats['queries_with_sufficient_results']}")
|
| 218 |
+
report.append(f" Queries with insufficient results (<{min_required}): {stats['queries_with_insufficient_results']}")
|
| 219 |
+
report.append(f" Queries missing completely: {stats['queries_missing_completely']}")
|
| 220 |
+
|
| 221 |
+
# Report insufficient queries
|
| 222 |
+
if results['insufficient_queries']:
|
| 223 |
+
report.append("\n" + "-" * 80)
|
| 224 |
+
report.append("⚠️ QUERIES WITH INSUFFICIENT RESULTS:")
|
| 225 |
+
report.append("-" * 80)
|
| 226 |
+
for item in results['insufficient_queries']:
|
| 227 |
+
query = item['query']
|
| 228 |
+
count = item['count']
|
| 229 |
+
needed = item['needed']
|
| 230 |
+
report.append(f"\n Query: {query[:80]}...")
|
| 231 |
+
report.append(f" Current count: {count}/{min_required} (needs {needed} more)")
|
| 232 |
+
locations = results['query_counts'][query]['locations']
|
| 233 |
+
# Extract round/folder names from paths
|
| 234 |
+
location_names = []
|
| 235 |
+
for loc in locations[:5]:
|
| 236 |
+
parts = loc.split('/')
|
| 237 |
+
if len(parts) > 0:
|
| 238 |
+
location_names.append(parts[0]) # Get the first folder (round/subfolder)
|
| 239 |
+
report.append(f" Found in: {', '.join(location_names)}")
|
| 240 |
+
if len(locations) > 5:
|
| 241 |
+
report.append(f" ... and {len(locations) - 5} more")
|
| 242 |
+
|
| 243 |
+
# Report completely missing queries
|
| 244 |
+
if results['missing_completely']:
|
| 245 |
+
report.append("\n" + "-" * 80)
|
| 246 |
+
report.append("❌ QUERIES MISSING COMPLETELY:")
|
| 247 |
+
report.append("-" * 80)
|
| 248 |
+
for query in results['missing_completely'][:10]: # Show first 10
|
| 249 |
+
report.append(f" • {query}")
|
| 250 |
+
if len(results['missing_completely']) > 10:
|
| 251 |
+
report.append(f" ... and {len(results['missing_completely']) - 10} more")
|
| 252 |
+
|
| 253 |
+
# Add query count distribution
|
| 254 |
+
if results.get('query_counts'):
|
| 255 |
+
report.append("\n" + "-" * 80)
|
| 256 |
+
report.append("📈 QUERY COUNT DISTRIBUTION:")
|
| 257 |
+
report.append("-" * 80)
|
| 258 |
+
count_distribution = defaultdict(list)
|
| 259 |
+
for query, data in results['query_counts'].items():
|
| 260 |
+
count = data['count']
|
| 261 |
+
count_distribution[count].append(query)
|
| 262 |
+
|
| 263 |
+
for count in sorted(count_distribution.keys()):
|
| 264 |
+
queries_at_count = count_distribution[count]
|
| 265 |
+
report.append(f" {count} eval_results.json: {len(queries_at_count)} queries")
|
| 266 |
+
if count < min_required and len(queries_at_count) <= 3:
|
| 267 |
+
for q in queries_at_count:
|
| 268 |
+
report.append(f" - {q[:70]}...")
|
| 269 |
+
|
| 270 |
+
# Summary
|
| 271 |
+
total_missing = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
|
| 272 |
+
len(results.get('missing_completely', [])) * min_required
|
| 273 |
+
|
| 274 |
+
report.append("\n" + "=" * 80)
|
| 275 |
+
report.append(f"SUMMARY: Need {total_missing} more evaluations to reach {min_required} per query")
|
| 276 |
+
report.append("=" * 80)
|
| 277 |
+
|
| 278 |
+
return "\n".join(report)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def main():
|
| 282 |
+
"""Main function to run the script."""
|
| 283 |
+
import argparse
|
| 284 |
+
|
| 285 |
+
parser = argparse.ArgumentParser(
|
| 286 |
+
description="Check if each query has at least N eval_results.json files across all subfolders"
|
| 287 |
+
)
|
| 288 |
+
parser.add_argument(
|
| 289 |
+
"path",
|
| 290 |
+
type=str,
|
| 291 |
+
help="Path to the model folder (e.g., eval_vbench_results/modelscope)"
|
| 292 |
+
)
|
| 293 |
+
parser.add_argument(
|
| 294 |
+
"--min-required",
|
| 295 |
+
type=int,
|
| 296 |
+
default=10,
|
| 297 |
+
help="Minimum number of eval_results.json files required per query (default: 10)"
|
| 298 |
+
)
|
| 299 |
+
parser.add_argument(
|
| 300 |
+
"--output",
|
| 301 |
+
type=str,
|
| 302 |
+
help="Save report to file"
|
| 303 |
+
)
|
| 304 |
+
parser.add_argument(
|
| 305 |
+
"--queries-output",
|
| 306 |
+
type=str,
|
| 307 |
+
default="queries_to_evaluate.txt",
|
| 308 |
+
help="Save queries that need more evaluations to a text file (repeated as needed)"
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
args = parser.parse_args()
|
| 312 |
+
|
| 313 |
+
print(f"Checking query completeness in: {args.path}")
|
| 314 |
+
print(f"Minimum required results per query: {args.min_required}")
|
| 315 |
+
print("-" * 80)
|
| 316 |
+
|
| 317 |
+
# Count query occurrences with expected queries
|
| 318 |
+
results = count_query_occurrences(args.path, args.min_required, queries_to_evaluate)
|
| 319 |
+
|
| 320 |
+
# Generate report
|
| 321 |
+
report = generate_completeness_report(results, args.min_required)
|
| 322 |
+
print("\n" + report)
|
| 323 |
+
|
| 324 |
+
# Save report if requested
|
| 325 |
+
if args.output:
|
| 326 |
+
output_path = Path(args.path) / args.output
|
| 327 |
+
with open(output_path, 'w') as f:
|
| 328 |
+
f.write(report)
|
| 329 |
+
print(f"\nReport saved to: {output_path}")
|
| 330 |
+
|
| 331 |
+
# Save queries that need more evaluations
|
| 332 |
+
if args.queries_output and (results.get('insufficient_queries') or results.get('missing_completely')):
|
| 333 |
+
output_path = Path(args.path) / args.queries_output
|
| 334 |
+
save_insufficient_queries(results, output_path, args.min_required)
|
| 335 |
+
|
| 336 |
+
# Return exit code
|
| 337 |
+
insufficient_count = len(results.get('insufficient_queries', [])) + len(results.get('missing_completely', []))
|
| 338 |
+
return 0 if insufficient_count == 0 else 1
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
if __name__ == "__main__":
|
| 342 |
+
exit(main())
|
eval_agent/eval_agent_for_t2i_compbench.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
import argparse
|
| 5 |
+
import Levenshtein
|
| 6 |
+
|
| 7 |
+
from base_agent import BaseAgent
|
| 8 |
+
from system_prompts import sys_prompts
|
| 9 |
+
from tools import ToolCalling
|
| 10 |
+
from process import *
|
| 11 |
+
import pandas as pd
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def parse_args():
|
| 15 |
+
parser = argparse.ArgumentParser(description='Eval-Agent-T2I-CompBench', formatter_class=argparse.RawTextHelpFormatter)
|
| 16 |
+
|
| 17 |
+
parser.add_argument(
|
| 18 |
+
"--user_query",
|
| 19 |
+
type=str,
|
| 20 |
+
required=True,
|
| 21 |
+
help="user query",
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
parser.add_argument(
|
| 25 |
+
"--model",
|
| 26 |
+
type=str,
|
| 27 |
+
required=True,
|
| 28 |
+
default="sdxl-1",
|
| 29 |
+
help="model",
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
args = parser.parse_args()
|
| 33 |
+
return args
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def most_similar_string(prompt, string_list):
|
| 39 |
+
similarities = [Levenshtein.distance(prompt, item) for item in string_list]
|
| 40 |
+
most_similar_idx = similarities.index(min(similarities))
|
| 41 |
+
return string_list[most_similar_idx]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def check_and_fix_prompt(chosed_prompts, prompt_list):
|
| 45 |
+
results_dict={}
|
| 46 |
+
|
| 47 |
+
for key, item in chosed_prompts.items():
|
| 48 |
+
item["Prompt"] = most_similar_string(item["Prompt"], prompt_list)
|
| 49 |
+
|
| 50 |
+
results_dict[key] = item
|
| 51 |
+
|
| 52 |
+
return results_dict
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def format_dimension_as_string(df, dimension_name):
|
| 56 |
+
row = df.loc[df['Dimension'] == dimension_name]
|
| 57 |
+
if row.empty:
|
| 58 |
+
return f"No data found for dimension: {dimension_name}"
|
| 59 |
+
|
| 60 |
+
formatted_string = (
|
| 61 |
+
f"{row['Dimension'].values[0]}: "
|
| 62 |
+
f"Very High -> {row['Very High'].values[0]}, "
|
| 63 |
+
f"High -> {row['High'].values[0]}, "
|
| 64 |
+
f"Moderate -> {row['Moderate'].values[0]}, "
|
| 65 |
+
f"Low -> {row['Low'].values[0]}, "
|
| 66 |
+
f"Very Low -> {row['Very Low'].values[0]}"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
return formatted_string
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class EvalAgent:
|
| 73 |
+
def __init__(self, sample_model="sdxl-1", save_mode="img", refer_file="t2i_comp_dimension_scores.tsv"):
|
| 74 |
+
self.tools = ToolCalling(sample_model=sample_model, save_mode=save_mode)
|
| 75 |
+
self.sample_model = sample_model
|
| 76 |
+
self.user_query = ""
|
| 77 |
+
self.tsv_file_path = refer_file
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def init_agent(self):
|
| 82 |
+
self.prompt_agent = BaseAgent(system_prompt=sys_prompts["t2i-comp-prompt-sys"], use_history=False, temp=0.7)
|
| 83 |
+
self.plan_agent = BaseAgent(system_prompt=sys_prompts["t2i-comp-plan-sys"], use_history=True, temp=0.7)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def sample_and_eval(self, designed_prompts, save_path, tool_name):
|
| 88 |
+
prompts = [item["Prompt"] for _, item in designed_prompts.items()]
|
| 89 |
+
video_pairs = self.tools.sample(prompts, save_path)
|
| 90 |
+
eval_results = self.tools.eval(tool_name, video_pairs)
|
| 91 |
+
return eval_results
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def reference_prompt(self, search_dim):
|
| 95 |
+
search_item = search_dim.replace("_binding", "")
|
| 96 |
+
file_path = f"./eval_tools/t2i_comp/prompt_file/{search_item}_val.txt"
|
| 97 |
+
|
| 98 |
+
with open(file_path, "r") as f:
|
| 99 |
+
lines = f.readlines()
|
| 100 |
+
lines = [line.strip() for line in lines]
|
| 101 |
+
|
| 102 |
+
return lines
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def format_eval_result(self, results, reference_table):
|
| 107 |
+
question = results["Sub-aspect"]
|
| 108 |
+
tool_name = results["Tool"]
|
| 109 |
+
average_score = results["eval_results"]["score"][0]
|
| 110 |
+
video_results = results["eval_results"]["score"][1]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
output = f"Sub-aspect: {question}\n"
|
| 114 |
+
output += f"The score categorization table for the numerical results evaluated by the '{tool_name}' is as follows:\n{reference_table}\n\n"
|
| 115 |
+
output += f"Observation: The evaluation results using '{tool_name}' are summarized below.\n"
|
| 116 |
+
output += f"Average Score: {average_score:.4f}\n"
|
| 117 |
+
output += "Detailed Results:\n"
|
| 118 |
+
|
| 119 |
+
for i, video in enumerate(video_results, 1):
|
| 120 |
+
prompt = video["prompt"]
|
| 121 |
+
score = video["image_results"]
|
| 122 |
+
output += f"\t{i}. Prompt: {prompt}\n"
|
| 123 |
+
output += f"\tScore: {score:.4f}\n"
|
| 124 |
+
|
| 125 |
+
return output
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def update_info(self):
|
| 130 |
+
folder_name = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_")
|
| 131 |
+
self.save_path = f"./eval_t2i_comp_results/{self.sample_model}/{folder_name}"
|
| 132 |
+
os.makedirs(self.save_path, exist_ok=True)
|
| 133 |
+
|
| 134 |
+
self.image_folder = os.path.join(self.save_path, "images")
|
| 135 |
+
self.file_name = os.path.join(self.save_path, f"eval_results.json")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def explore(self, query, all_chat=[]):
|
| 140 |
+
|
| 141 |
+
self.user_query = query
|
| 142 |
+
self.update_info()
|
| 143 |
+
self.init_agent()
|
| 144 |
+
df = pd.read_csv(self.tsv_file_path, sep='\t')
|
| 145 |
+
|
| 146 |
+
plan_query = query
|
| 147 |
+
all_chat.append(plan_query)
|
| 148 |
+
|
| 149 |
+
n = 0
|
| 150 |
+
while True:
|
| 151 |
+
|
| 152 |
+
plans = self.plan_agent(plan_query, parse=True)
|
| 153 |
+
if plans.get("Analysis"):
|
| 154 |
+
all_chat.append(plans)
|
| 155 |
+
print("Finish!")
|
| 156 |
+
break
|
| 157 |
+
|
| 158 |
+
tool_name = plans["Tool"].lower().strip().replace(" ", "_")
|
| 159 |
+
reference_table = format_dimension_as_string(df, plans["Tool"])
|
| 160 |
+
|
| 161 |
+
prompt_query = json.dumps(plans)
|
| 162 |
+
prompt_list = self.reference_prompt(tool_name)
|
| 163 |
+
prompt_query = f"Context:\n{prompt_query}\n\nPrompt list:\n{json.dumps(prompt_list)}"
|
| 164 |
+
|
| 165 |
+
designed_prompts = self.prompt_agent(prompt_query, parse=True)
|
| 166 |
+
designed_prompts = check_and_fix_prompt(designed_prompts, prompt_list)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
plans["eval_results"] = self.sample_and_eval(designed_prompts, self.image_folder, tool_name)
|
| 170 |
+
plan_query = self.format_eval_result(plans, reference_table=reference_table)
|
| 171 |
+
all_chat.append(plans)
|
| 172 |
+
|
| 173 |
+
if n > 9:
|
| 174 |
+
break
|
| 175 |
+
n += 1
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
all_chat.append(self.plan_agent.messages)
|
| 179 |
+
save_json(all_chat, self.file_name)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def main():
|
| 185 |
+
args = parse_args()
|
| 186 |
+
user_query = args.user_query
|
| 187 |
+
eval_agent = EvalAgent(sample_model=args.model, save_mode="img")
|
| 188 |
+
eval_agent.explore(user_query)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
if __name__ == "__main__":
|
| 192 |
+
main()
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
|
eval_agent/eval_agent_for_vbench.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re, time, os
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import argparse
|
| 6 |
+
import Levenshtein
|
| 7 |
+
|
| 8 |
+
from base_agent import BaseAgent
|
| 9 |
+
from system_prompts import sys_prompts
|
| 10 |
+
from tools import ToolCalling
|
| 11 |
+
from process import *
|
| 12 |
+
import pandas as pd
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def parse_args():
|
| 17 |
+
parser = argparse.ArgumentParser(description='Eval-Agent-VBench', formatter_class=argparse.RawTextHelpFormatter)
|
| 18 |
+
|
| 19 |
+
parser.add_argument(
|
| 20 |
+
"--user_query",
|
| 21 |
+
type=str,
|
| 22 |
+
required=True,
|
| 23 |
+
help="user query",
|
| 24 |
+
)
|
| 25 |
+
parser.add_argument(
|
| 26 |
+
"--model",
|
| 27 |
+
type=str,
|
| 28 |
+
default="latte1",
|
| 29 |
+
help="target model",
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
args = parser.parse_args()
|
| 33 |
+
return args
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def most_similar_string(prompt, string_list):
|
| 39 |
+
similarities = [Levenshtein.distance(prompt, item["Prompt"]) for item in string_list]
|
| 40 |
+
most_similar_idx = similarities.index(min(similarities))
|
| 41 |
+
return string_list[most_similar_idx]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def check_and_fix_prompt(chosed_prompts, prompt_list):
|
| 45 |
+
results_dict={}
|
| 46 |
+
|
| 47 |
+
for key, item in chosed_prompts.items():
|
| 48 |
+
thought = item["Thought"]
|
| 49 |
+
sim_item = most_similar_string(item["Prompt"], prompt_list)
|
| 50 |
+
sim_item["Thought"] = thought
|
| 51 |
+
results_dict[key] = sim_item
|
| 52 |
+
|
| 53 |
+
return results_dict
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def format_dimension_as_string(df, dimension_name):
|
| 57 |
+
row = df.loc[df['Dimension'] == dimension_name]
|
| 58 |
+
if row.empty:
|
| 59 |
+
return f"No data found for dimension: {dimension_name}"
|
| 60 |
+
|
| 61 |
+
formatted_string = (
|
| 62 |
+
f"{row['Dimension'].values[0]}: "
|
| 63 |
+
f"Very High -> {row['Very High'].values[0]}, "
|
| 64 |
+
f"High -> {row['High'].values[0]}, "
|
| 65 |
+
f"Moderate -> {row['Moderate'].values[0]}, "
|
| 66 |
+
f"Low -> {row['Low'].values[0]}, "
|
| 67 |
+
f"Very Low -> {row['Very Low'].values[0]}"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
return formatted_string
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class EvalAgent:
|
| 75 |
+
def __init__(self, sample_model="latte1", save_mode="video", refer_file="vbench_dimension_scores.tsv"):
|
| 76 |
+
# self.tools = ToolCalling(sample_model=sample_model, save_mode=save_mode)
|
| 77 |
+
self.sample_model = sample_model
|
| 78 |
+
self.user_query = ""
|
| 79 |
+
self.tsv_file_path = refer_file
|
| 80 |
+
|
| 81 |
+
def init_agent(self):
|
| 82 |
+
|
| 83 |
+
self.prompt_agent = BaseAgent(system_prompt=sys_prompts["vbench-prompt-sys"], use_history=False, temp=0.7)
|
| 84 |
+
self.plan_agent = BaseAgent(system_prompt=sys_prompts["vbench-plan-sys"], use_history=True, temp=0.7)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def search_auxiliary(self, designed_prompts, prompt):
|
| 89 |
+
for _, value in designed_prompts.items():
|
| 90 |
+
if value['Prompt'] == prompt:
|
| 91 |
+
return value["auxiliary_info"]
|
| 92 |
+
raise "Didn't find auxiliary info, please check your json."
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def sample_and_eval(self, designed_prompts, save_path, tool_name):
|
| 96 |
+
prompts = [item["Prompt"] for _, item in designed_prompts.items()]
|
| 97 |
+
video_pairs = self.tools.sample(prompts, save_path)
|
| 98 |
+
if 'auxiliary_info' in designed_prompts["Step 1"]:
|
| 99 |
+
for item in video_pairs:
|
| 100 |
+
item["auxiliary_info"] = self.search_auxiliary(designed_prompts, item["prompt"])
|
| 101 |
+
|
| 102 |
+
eval_results = self.tools.eval(tool_name, video_pairs)
|
| 103 |
+
return eval_results
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def reference_prompt(self, search_dim):
|
| 107 |
+
file_path = "./eval_tools/vbench/VBench_full_info.json"
|
| 108 |
+
data = json.load(open(file_path, "r"))
|
| 109 |
+
|
| 110 |
+
results = []
|
| 111 |
+
for item in data:
|
| 112 |
+
if search_dim in item["dimension"]:
|
| 113 |
+
item.pop("dimension")
|
| 114 |
+
item["Prompt"] = item.pop("prompt_en")
|
| 115 |
+
if 'auxiliary_info' in item and search_dim in item['auxiliary_info']:
|
| 116 |
+
item["auxiliary_info"] = list(item["auxiliary_info"][search_dim].values())[0]
|
| 117 |
+
results.append(item)
|
| 118 |
+
|
| 119 |
+
return results
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def format_eval_result(self, results, reference_table):
|
| 124 |
+
question = results["Sub-aspect"]
|
| 125 |
+
tool_name = results["Tool"]
|
| 126 |
+
average_score = results["eval_results"]["score"][0]
|
| 127 |
+
video_results = results["eval_results"]["score"][1]
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
output = f"Sub-aspect: {question}\n"
|
| 131 |
+
output += f"The score categorization table for the numerical results evaluated by the '{tool_name}' is as follows:\n{reference_table}\n\n"
|
| 132 |
+
output += f"Observation: The evaluation results using '{tool_name}' are summarized below.\n"
|
| 133 |
+
output += f"Average Score: {average_score:.4f}\n"
|
| 134 |
+
output += "Detailed Results:\n"
|
| 135 |
+
|
| 136 |
+
for i, video in enumerate(video_results, 1):
|
| 137 |
+
prompt = video["prompt"]
|
| 138 |
+
score = video["video_results"]
|
| 139 |
+
output += f"\t{i}. Prompt: {prompt}\n"
|
| 140 |
+
output += f"\tScore: {score:.4f}\n"
|
| 141 |
+
|
| 142 |
+
return output
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def update_info(self):
|
| 146 |
+
folder_name = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_")
|
| 147 |
+
self.save_path = f"./eval_vbench_results/{self.sample_model}/{folder_name}"
|
| 148 |
+
os.makedirs(self.save_path, exist_ok=True)
|
| 149 |
+
|
| 150 |
+
self.video_folder = os.path.join(self.save_path, "videos")
|
| 151 |
+
self.file_name = os.path.join(self.save_path, f"eval_results.json")
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def explore(self, query, all_chat=[]):
|
| 156 |
+
|
| 157 |
+
self.user_query = query
|
| 158 |
+
self.update_info()
|
| 159 |
+
self.init_agent()
|
| 160 |
+
df = pd.read_csv(self.tsv_file_path, sep='\t')
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
plan_query = query
|
| 164 |
+
all_chat.append(plan_query)
|
| 165 |
+
|
| 166 |
+
n = 0
|
| 167 |
+
while True:
|
| 168 |
+
breakpoint()
|
| 169 |
+
plans = self.plan_agent(plan_query, parse=True)
|
| 170 |
+
if plans.get("Analysis"):
|
| 171 |
+
all_chat.append(plans)
|
| 172 |
+
print("Finish!")
|
| 173 |
+
break
|
| 174 |
+
|
| 175 |
+
tool_name = plans["Tool"].lower().strip().replace(" ", "_")
|
| 176 |
+
reference_table = format_dimension_as_string(df, plans["Tool"])
|
| 177 |
+
|
| 178 |
+
prompt_query = json.dumps(plans)
|
| 179 |
+
prompt_list = self.reference_prompt(tool_name)
|
| 180 |
+
prompt_query = f"Context:\n{prompt_query}\n\nPrompt list:\n{json.dumps(prompt_list)}"
|
| 181 |
+
|
| 182 |
+
designed_prompts = self.prompt_agent(prompt_query, parse=True)
|
| 183 |
+
designed_prompts = check_and_fix_prompt(designed_prompts, prompt_list)
|
| 184 |
+
|
| 185 |
+
plans["eval_results"] = self.sample_and_eval(designed_prompts, self.video_folder, tool_name)
|
| 186 |
+
plan_query = self.format_eval_result(plans, reference_table=reference_table)
|
| 187 |
+
|
| 188 |
+
all_chat.append(plans)
|
| 189 |
+
|
| 190 |
+
if n > 9:
|
| 191 |
+
break
|
| 192 |
+
n += 1
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
all_chat.append(self.plan_agent.messages)
|
| 196 |
+
save_json(all_chat, self.file_name)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def main():
|
| 200 |
+
args = parse_args()
|
| 201 |
+
user_query = args.user_query
|
| 202 |
+
eval_agent = EvalAgent(sample_model=args.model, save_mode="video")
|
| 203 |
+
eval_agent.explore(user_query)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
if __name__ == "__main__":
|
| 207 |
+
main()
|
eval_agent/eval_agent_for_vbench_open.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re, time, os
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import argparse
|
| 6 |
+
import Levenshtein
|
| 7 |
+
|
| 8 |
+
from base_agent import BaseAgent_SFT, BaseAgent_Open
|
| 9 |
+
from system_prompts import sys_prompts
|
| 10 |
+
from tools import ToolCalling
|
| 11 |
+
from vbench_leaderboard import VBenchLeaderboard
|
| 12 |
+
import pandas as pd
|
| 13 |
+
from process import *
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def parse_args():
|
| 18 |
+
parser = argparse.ArgumentParser(description='Eval-Agent-VBench', formatter_class=argparse.RawTextHelpFormatter)
|
| 19 |
+
|
| 20 |
+
parser.add_argument(
|
| 21 |
+
"--user_query",
|
| 22 |
+
type=str,
|
| 23 |
+
required=True,
|
| 24 |
+
help="user query",
|
| 25 |
+
)
|
| 26 |
+
parser.add_argument(
|
| 27 |
+
"--model",
|
| 28 |
+
type=str,
|
| 29 |
+
default="latte1",
|
| 30 |
+
help="target model",
|
| 31 |
+
)
|
| 32 |
+
parser.add_argument(
|
| 33 |
+
"--recommend",
|
| 34 |
+
action="store_true",
|
| 35 |
+
help="recommend model",
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
args = parser.parse_args()
|
| 39 |
+
return args
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class EvalAgent:
|
| 43 |
+
def __init__(self, sample_model="latte1", save_mode="video", refer_file="vbench_dimension_scores.tsv", recommend=False):
|
| 44 |
+
self.tools = ToolCalling(sample_model=sample_model, save_mode=save_mode)
|
| 45 |
+
self.sample_model = sample_model
|
| 46 |
+
self.user_query = ""
|
| 47 |
+
self.tsv_file_path = refer_file
|
| 48 |
+
self.recommend = recommend
|
| 49 |
+
|
| 50 |
+
def init_agent(self):
|
| 51 |
+
self.eval_agent = BaseAgent_SFT(system_prompt=sys_prompts["eval-agent-vbench-training-sys"], use_history=True, temp=0.5)
|
| 52 |
+
# self.prompt_agent = BaseAgent_Open(system_prompt=sys_prompts["vbench-prompt-sys"], use_history=True, temp=0.5)
|
| 53 |
+
self.prompt_agent = BaseAgent_SFT(system_prompt=sys_prompts["vbench-prompt-sys-open"], use_history=True, temp=0.5, model_name_or_path="http://0.0.0.0:12334/v1/chat/completions")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def recommend_model(self, query):
|
| 57 |
+
leaderboard = VBenchLeaderboard()
|
| 58 |
+
recommendations = leaderboard.recommend_model(query, top_k=3)
|
| 59 |
+
report = leaderboard.generate_recommendation_report(query, recommendations)
|
| 60 |
+
return report
|
| 61 |
+
|
| 62 |
+
def search_auxiliary(self, designed_prompts, prompt):
|
| 63 |
+
for _, value in designed_prompts.items():
|
| 64 |
+
if value['prompt'] == prompt:
|
| 65 |
+
return value["auxiliary_info"]
|
| 66 |
+
raise "Didn't find auxiliary info, please check your json."
|
| 67 |
+
|
| 68 |
+
def sample_and_eval(self, designed_prompts, save_path, tool_name):
|
| 69 |
+
try:
|
| 70 |
+
prompts = [item["prompt"] for _, item in designed_prompts.items()]
|
| 71 |
+
except:
|
| 72 |
+
designed_prompts = parse_json(designed_prompts)
|
| 73 |
+
if isinstance(designed_prompts, list):
|
| 74 |
+
prompts = [item["prompt"] for item in designed_prompts]
|
| 75 |
+
else:
|
| 76 |
+
prompts = [item["prompt"] for _, item in designed_prompts.items()]
|
| 77 |
+
|
| 78 |
+
video_pairs = self.tools.sample(prompts, save_path)
|
| 79 |
+
if 'auxiliary_info' in designed_prompts["Step 1"]:
|
| 80 |
+
for item in video_pairs:
|
| 81 |
+
item["auxiliary_info"] = self.search_auxiliary(designed_prompts, item["prompt"])
|
| 82 |
+
|
| 83 |
+
eval_results = self.tools.eval(tool_name, video_pairs)
|
| 84 |
+
return eval_results
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def reference_prompt(self, search_dim):
|
| 88 |
+
file_path = "./eval_tools/vbench/VBench_full_info.json"
|
| 89 |
+
data = json.load(open(file_path, "r"))
|
| 90 |
+
|
| 91 |
+
results = []
|
| 92 |
+
for item in data:
|
| 93 |
+
if search_dim in item["dimension"]:
|
| 94 |
+
item.pop("dimension")
|
| 95 |
+
item["Prompt"] = item.pop("prompt_en")
|
| 96 |
+
if 'auxiliary_info' in item and search_dim in item['auxiliary_info']:
|
| 97 |
+
item["auxiliary_info"] = list(item["auxiliary_info"][search_dim].values())[0]
|
| 98 |
+
results.append(item)
|
| 99 |
+
|
| 100 |
+
return results
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# def format_eval_result(self, results, reference_table):
|
| 105 |
+
# question = results["Sub-aspect"]
|
| 106 |
+
# tool_name = results["Tool"]
|
| 107 |
+
# average_score = results["eval_results"]["score"][0]
|
| 108 |
+
# video_results = results["eval_results"]["score"][1]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# output = f"Sub-aspect: {question}\n"
|
| 112 |
+
# output += f"The score categorization table for the numerical results evaluated by the '{tool_name}' is as follows:\n{reference_table}\n\n"
|
| 113 |
+
# output += f"Observation: The evaluation results using '{tool_name}' are summarized below.\n"
|
| 114 |
+
# output += f"Average Score: {average_score:.4f}\n"
|
| 115 |
+
# output += "Detailed Results:\n"
|
| 116 |
+
|
| 117 |
+
# for i, video in enumerate(video_results, 1):
|
| 118 |
+
# prompt = video["prompt"]
|
| 119 |
+
# score = video["video_results"]
|
| 120 |
+
# output += f"\t{i}. Prompt: {prompt}\n"
|
| 121 |
+
# output += f"\tScore: {score:.4f}\n"
|
| 122 |
+
|
| 123 |
+
# return output
|
| 124 |
+
def format_eval_results(self, results, reference_table):
|
| 125 |
+
tool_name = results["tool"]
|
| 126 |
+
average_score = results["eval_results"]["score"][0]
|
| 127 |
+
video_results = results["eval_results"]["score"][1]
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# More concise and structured format for SFT
|
| 131 |
+
output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
|
| 132 |
+
output += f"Results:\n"
|
| 133 |
+
output += f"- Overall score: {average_score:.4f}\n"
|
| 134 |
+
output += f"- Per-prompt scores:\n"
|
| 135 |
+
|
| 136 |
+
for video in video_results:
|
| 137 |
+
prompt = video["prompt"]
|
| 138 |
+
score = video["video_results"]
|
| 139 |
+
output += f" • \"{prompt}\": {score:.4f}\n"
|
| 140 |
+
|
| 141 |
+
return output
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def update_info(self):
|
| 145 |
+
# folder_name = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_")
|
| 146 |
+
folder_name = os.environ["FOLDER_NAME"] if "FOLDER_NAME" in os.environ else datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + "-" + self.user_query.replace(" ", "_") # the environment folder name categorizes model into different rounds
|
| 147 |
+
self.save_path = f"./eval_vbench_results/{self.sample_model}/{folder_name}"
|
| 148 |
+
os.makedirs(self.save_path, exist_ok=True)
|
| 149 |
+
|
| 150 |
+
self.video_folder = os.path.join(self.save_path, "videos")
|
| 151 |
+
self.file_name = os.path.join(self.save_path, f"eval_results.json")
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def explore(self, query, all_chat=[]):
|
| 156 |
+
|
| 157 |
+
self.user_query = query
|
| 158 |
+
self.update_info()
|
| 159 |
+
self.init_agent()
|
| 160 |
+
df = pd.read_csv(self.tsv_file_path, sep='\t')
|
| 161 |
+
|
| 162 |
+
plan_query = query
|
| 163 |
+
all_chat.append(plan_query)
|
| 164 |
+
|
| 165 |
+
n = 0
|
| 166 |
+
|
| 167 |
+
start_time = time.time()
|
| 168 |
+
while True:
|
| 169 |
+
plans_str = self.eval_agent(plan_query)
|
| 170 |
+
plans = format_plans(plans_str)
|
| 171 |
+
|
| 172 |
+
if '</summary>' in plans_str:
|
| 173 |
+
print(f"Finish! Time: {time.time() - start_time:.2f}s")
|
| 174 |
+
plans["eval_time"] = time.time() - start_time
|
| 175 |
+
|
| 176 |
+
if self.recommend:
|
| 177 |
+
print("Generating recommendation report...")
|
| 178 |
+
report = self.recommend_model(query)
|
| 179 |
+
plans["recommendation_report"] = report
|
| 180 |
+
print(f"\nQuery: {query}")
|
| 181 |
+
print("-" * 40)
|
| 182 |
+
print(report)
|
| 183 |
+
print("\n" + "="*80)
|
| 184 |
+
|
| 185 |
+
all_chat.append(plans)
|
| 186 |
+
break
|
| 187 |
+
|
| 188 |
+
for _ in range(3):
|
| 189 |
+
try:
|
| 190 |
+
tool = plans.get('tool', None)
|
| 191 |
+
if tool and tool_existence(tool):
|
| 192 |
+
plans["tool"] = tool_existence(tool)
|
| 193 |
+
break
|
| 194 |
+
else:
|
| 195 |
+
# If tool does not exist, regenerate plan_str
|
| 196 |
+
plans_str = self.eval_agent(plan_query)
|
| 197 |
+
plans = format_plans(plans_str)
|
| 198 |
+
except Exception as e:
|
| 199 |
+
# Safe error message that doesn't assume 'tool' key exists
|
| 200 |
+
tool_name = plans.get("tool", "UNKNOWN")
|
| 201 |
+
print(f"❌ Tool '{tool_name}' not found or not valid.")
|
| 202 |
+
print(f"Generated plan: {plans_str[:200]}...")
|
| 203 |
+
print(f"Parsed result: {plans}")
|
| 204 |
+
print(f"Error: {e}")
|
| 205 |
+
continue # Try again
|
| 206 |
+
|
| 207 |
+
# tool_name_ori = plans["tool"]
|
| 208 |
+
# reference_table = format_dimension_as_string(df, tool_name_ori)
|
| 209 |
+
reference_table = format_dimension_as_string(df, plans["tool"])
|
| 210 |
+
|
| 211 |
+
# tool_name = tool_name_ori.replace(" ", "_").lower() # Subject Consistency -> subject_consistency
|
| 212 |
+
prompt_list = self.reference_prompt(plans["tool"])
|
| 213 |
+
prompt_query = f"## Context:\n{json.dumps(plans)}\n\n ## Prompt list:\n{json.dumps(prompt_list)}"
|
| 214 |
+
|
| 215 |
+
designed_prompts = self.prompt_agent(prompt_query)
|
| 216 |
+
|
| 217 |
+
plans["eval_results"] = self.sample_and_eval(designed_prompts, self.video_folder, plans["tool"])
|
| 218 |
+
|
| 219 |
+
plan_query = self.format_eval_results(plans, reference_table=reference_table) # NEW plan query, simpler
|
| 220 |
+
|
| 221 |
+
all_chat.append(plans)
|
| 222 |
+
|
| 223 |
+
if n > 9:
|
| 224 |
+
break
|
| 225 |
+
n += 1
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
all_chat.append(self.eval_agent.messages)
|
| 229 |
+
save_json(all_chat, self.file_name)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def main():
|
| 233 |
+
args = parse_args()
|
| 234 |
+
user_query = args.user_query
|
| 235 |
+
eval_agent = EvalAgent(sample_model=args.model, save_mode="video", recommend=args.recommend)
|
| 236 |
+
eval_agent.explore(user_query)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
if __name__ == "__main__":
|
| 240 |
+
main()
|
eval_agent/eval_tools/__init__.py
ADDED
|
File without changes
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/__init__.py
ADDED
|
File without changes
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/train_vqa_func.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 6 |
+
* By Junnan Li
|
| 7 |
+
'''
|
| 8 |
+
import argparse
|
| 9 |
+
import os
|
| 10 |
+
# import ruamel_yaml as yaml
|
| 11 |
+
try:
|
| 12 |
+
import ruamel_yaml as yaml
|
| 13 |
+
except ModuleNotFoundError:
|
| 14 |
+
import ruamel.yaml as yaml
|
| 15 |
+
import numpy as np
|
| 16 |
+
import random
|
| 17 |
+
import time
|
| 18 |
+
import datetime
|
| 19 |
+
import json
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.nn as nn
|
| 24 |
+
import torch.nn.functional as F
|
| 25 |
+
from torch.utils.data import DataLoader
|
| 26 |
+
import torch.backends.cudnn as cudnn
|
| 27 |
+
import torch.distributed as dist
|
| 28 |
+
import sys
|
| 29 |
+
sys.path.append("..")
|
| 30 |
+
# sys.path.append("/mnt/petrelfs/zhangfan.p/zhangfan/evaluate-agent/agent/eval_tools/t2i_comp/BLIPvqa_eval")
|
| 31 |
+
from models.blip_vqa import blip_vqa
|
| 32 |
+
import utils
|
| 33 |
+
from utils import cosine_lr_schedule
|
| 34 |
+
from data import create_dataset, create_sampler, create_loader
|
| 35 |
+
from data.vqa_dataset import vqa_collate_fn
|
| 36 |
+
from data.utils import save_result
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def train(model, data_loader, optimizer, epoch, device):
|
| 40 |
+
# train
|
| 41 |
+
model.train()
|
| 42 |
+
|
| 43 |
+
metric_logger = utils.MetricLogger(delimiter=" ")
|
| 44 |
+
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
| 45 |
+
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
|
| 46 |
+
|
| 47 |
+
header = 'Train Epoch: [{}]'.format(epoch)
|
| 48 |
+
print_freq = 50
|
| 49 |
+
|
| 50 |
+
for i,(image, question, answer, weights, n) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
| 51 |
+
image, weights = image.to(device,non_blocking=True), weights.to(device,non_blocking=True)
|
| 52 |
+
|
| 53 |
+
loss = model(image, question, answer, train=True, n=n, weights=weights)
|
| 54 |
+
|
| 55 |
+
optimizer.zero_grad()
|
| 56 |
+
loss.backward()
|
| 57 |
+
optimizer.step()
|
| 58 |
+
|
| 59 |
+
metric_logger.update(loss=loss.item())
|
| 60 |
+
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
| 61 |
+
|
| 62 |
+
# gather the stats from all processes
|
| 63 |
+
metric_logger.synchronize_between_processes()
|
| 64 |
+
print("Averaged stats:", metric_logger.global_avg())
|
| 65 |
+
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@torch.no_grad()
|
| 69 |
+
def evaluation(model, data_loader, device, config) :
|
| 70 |
+
# test
|
| 71 |
+
model.eval()
|
| 72 |
+
|
| 73 |
+
metric_logger = utils.MetricLogger(delimiter=" ")
|
| 74 |
+
header = 'Generate VQA test result:'
|
| 75 |
+
print_freq = 50
|
| 76 |
+
|
| 77 |
+
result = []
|
| 78 |
+
|
| 79 |
+
if config['inference']=='rank':
|
| 80 |
+
answer_list = data_loader.dataset.answer_list
|
| 81 |
+
answer_candidates = model.tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
|
| 82 |
+
answer_candidates.input_ids[:,0] = model.tokenizer.bos_token_id
|
| 83 |
+
|
| 84 |
+
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
| 85 |
+
image = image.to(device,non_blocking=True)
|
| 86 |
+
|
| 87 |
+
if config['inference']=='generate':
|
| 88 |
+
answers = model(image, question, train=False, inference='generate')
|
| 89 |
+
|
| 90 |
+
for answer, ques_id in zip(answers, question_id):
|
| 91 |
+
ques_id = int(ques_id.item())
|
| 92 |
+
result.append({"question_id":ques_id, "answer":answer})
|
| 93 |
+
|
| 94 |
+
elif config['inference']=='rank':
|
| 95 |
+
answer_ids = model(image, question, answer_candidates, train=False, inference='rank', k_test=config['k_test'])
|
| 96 |
+
|
| 97 |
+
for ques_id, answer_id in zip(question_id, answer_ids):
|
| 98 |
+
result.append({"question_id":int(ques_id.item()), "answer":answer_list[answer_id]})
|
| 99 |
+
|
| 100 |
+
elif config['inference'] == 'vqa_prob': #pred yes probability
|
| 101 |
+
probs = model(image, question, train=False, inference="vqa_prob")
|
| 102 |
+
for prob, ques_id in zip(probs, question_id):
|
| 103 |
+
result.append({"question_id":int(ques_id.item()), "answer":prob})
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
return result
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def VQA(evaluate, device, seed, distributed, config, result_dir, output_dir):
|
| 110 |
+
# utils.init_distributed_mode(args)
|
| 111 |
+
|
| 112 |
+
device = torch.device(device)
|
| 113 |
+
|
| 114 |
+
# fix the seed for reproducibility
|
| 115 |
+
seed = seed + utils.get_rank()
|
| 116 |
+
torch.manual_seed(seed)
|
| 117 |
+
np.random.seed(seed)
|
| 118 |
+
random.seed(seed)
|
| 119 |
+
cudnn.benchmark = True
|
| 120 |
+
|
| 121 |
+
#### Dataset ####
|
| 122 |
+
print("Creating vqa datasets")
|
| 123 |
+
datasets = create_dataset('vqa', config)
|
| 124 |
+
|
| 125 |
+
if distributed:
|
| 126 |
+
num_tasks = utils.get_world_size()
|
| 127 |
+
global_rank = utils.get_rank()
|
| 128 |
+
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
|
| 129 |
+
else:
|
| 130 |
+
samplers = [None, None]
|
| 131 |
+
|
| 132 |
+
train_loader, test_loader = create_loader(datasets,samplers,
|
| 133 |
+
batch_size=[config['batch_size_train'],config['batch_size_test']],
|
| 134 |
+
# num_workers=[4,4],is_trains=[True, False],
|
| 135 |
+
num_workers=[1,1],is_trains=[True, False],
|
| 136 |
+
collate_fns=[vqa_collate_fn,None])
|
| 137 |
+
#### Model ####
|
| 138 |
+
print("Creating model")
|
| 139 |
+
model = blip_vqa(pretrained=config['pretrained'], image_size=config['image_size'],
|
| 140 |
+
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'])
|
| 141 |
+
|
| 142 |
+
model = model.to(device)
|
| 143 |
+
|
| 144 |
+
model_without_ddp = model
|
| 145 |
+
if distributed:
|
| 146 |
+
model = torch.nn.parallel.DistributedDataParallel(model)
|
| 147 |
+
model_without_ddp = model.module
|
| 148 |
+
|
| 149 |
+
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
| 150 |
+
|
| 151 |
+
best = 0
|
| 152 |
+
best_epoch = 0
|
| 153 |
+
|
| 154 |
+
print("Start training")
|
| 155 |
+
start_time = time.time()
|
| 156 |
+
for epoch in range(0, config['max_epoch']):
|
| 157 |
+
if not evaluate:
|
| 158 |
+
if distributed:
|
| 159 |
+
train_loader.sampler.set_epoch(epoch)
|
| 160 |
+
|
| 161 |
+
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
|
| 162 |
+
|
| 163 |
+
train_stats = train(model, train_loader, optimizer, epoch, device)
|
| 164 |
+
|
| 165 |
+
else:
|
| 166 |
+
break
|
| 167 |
+
|
| 168 |
+
if utils.is_main_process():
|
| 169 |
+
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
| 170 |
+
'epoch': epoch,
|
| 171 |
+
}
|
| 172 |
+
with open(os.path.join(output_dir, "log.txt"),"a") as f:
|
| 173 |
+
f.write(json.dumps(log_stats) + "\n")
|
| 174 |
+
|
| 175 |
+
save_obj = {
|
| 176 |
+
'model': model_without_ddp.state_dict(),
|
| 177 |
+
'optimizer': optimizer.state_dict(),
|
| 178 |
+
'config': config,
|
| 179 |
+
'epoch': epoch,
|
| 180 |
+
}
|
| 181 |
+
torch.save(save_obj, os.path.join(output_dir, 'checkpoint_%02d.pth'%epoch))
|
| 182 |
+
|
| 183 |
+
dist.barrier()
|
| 184 |
+
|
| 185 |
+
vqa_result = evaluation(model_without_ddp, test_loader, device, config)
|
| 186 |
+
result_file = save_result(vqa_result, result_dir, 'vqa_result')
|
| 187 |
+
|
| 188 |
+
total_time = time.time() - start_time
|
| 189 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 190 |
+
print('Training time {}'.format(total_time_str))
|
| 191 |
+
return vqa_result
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def VQA_main(ann_root,output_dir,inference='vqa_prob'): #annotation path, output path
|
| 195 |
+
config = '/mnt/petrelfs/zhangfan.p/zhangfan/evaluate-agent/agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/vqa.yaml' # 'configs/vqa.yaml' #todo config file
|
| 196 |
+
evaluate = True
|
| 197 |
+
device = 'cuda'
|
| 198 |
+
seed = 42
|
| 199 |
+
distributed = False
|
| 200 |
+
|
| 201 |
+
config = yaml.load(open(config, 'r'), Loader=yaml.Loader)
|
| 202 |
+
|
| 203 |
+
config['ann_root']=ann_root
|
| 204 |
+
config['inference'] = inference
|
| 205 |
+
|
| 206 |
+
result_dir = os.path.join(output_dir, 'result')
|
| 207 |
+
|
| 208 |
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
| 209 |
+
Path(result_dir).mkdir(parents=True, exist_ok=True)
|
| 210 |
+
|
| 211 |
+
yaml.dump(config, open(os.path.join(output_dir, 'config.yaml'), 'w'))
|
| 212 |
+
|
| 213 |
+
result = VQA(evaluate, device, seed, distributed, config, result_dir, output_dir)
|
| 214 |
+
return result #list("question_id":0,"answer":yes)
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP/utils.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
|
| 3 |
+
"""Decay the learning rate"""
|
| 4 |
+
lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr
|
| 5 |
+
for param_group in optimizer.param_groups:
|
| 6 |
+
param_group['lr'] = lr
|
| 7 |
+
|
| 8 |
+
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
|
| 9 |
+
"""Warmup the learning rate"""
|
| 10 |
+
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step)
|
| 11 |
+
for param_group in optimizer.param_groups:
|
| 12 |
+
param_group['lr'] = lr
|
| 13 |
+
|
| 14 |
+
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
|
| 15 |
+
"""Decay the learning rate"""
|
| 16 |
+
lr = max(min_lr, init_lr * (decay_rate**epoch))
|
| 17 |
+
for param_group in optimizer.param_groups:
|
| 18 |
+
param_group['lr'] = lr
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import io
|
| 22 |
+
import os
|
| 23 |
+
import time
|
| 24 |
+
from collections import defaultdict, deque
|
| 25 |
+
import datetime
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
import torch.distributed as dist
|
| 29 |
+
|
| 30 |
+
class SmoothedValue(object):
|
| 31 |
+
"""Track a series of values and provide access to smoothed values over a
|
| 32 |
+
window or the global series average.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, window_size=20, fmt=None):
|
| 36 |
+
if fmt is None:
|
| 37 |
+
fmt = "{median:.4f} ({global_avg:.4f})"
|
| 38 |
+
self.deque = deque(maxlen=window_size)
|
| 39 |
+
self.total = 0.0
|
| 40 |
+
self.count = 0
|
| 41 |
+
self.fmt = fmt
|
| 42 |
+
|
| 43 |
+
def update(self, value, n=1):
|
| 44 |
+
self.deque.append(value)
|
| 45 |
+
self.count += n
|
| 46 |
+
self.total += value * n
|
| 47 |
+
|
| 48 |
+
def synchronize_between_processes(self):
|
| 49 |
+
"""
|
| 50 |
+
Warning: does not synchronize the deque!
|
| 51 |
+
"""
|
| 52 |
+
if not is_dist_avail_and_initialized():
|
| 53 |
+
return
|
| 54 |
+
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
| 55 |
+
dist.barrier()
|
| 56 |
+
dist.all_reduce(t)
|
| 57 |
+
t = t.tolist()
|
| 58 |
+
self.count = int(t[0])
|
| 59 |
+
self.total = t[1]
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def median(self):
|
| 63 |
+
d = torch.tensor(list(self.deque))
|
| 64 |
+
return d.median().item()
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def avg(self):
|
| 68 |
+
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
| 69 |
+
return d.mean().item()
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def global_avg(self):
|
| 73 |
+
return self.total / self.count
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def max(self):
|
| 77 |
+
return max(self.deque)
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def value(self):
|
| 81 |
+
return self.deque[-1]
|
| 82 |
+
|
| 83 |
+
def __str__(self):
|
| 84 |
+
return self.fmt.format(
|
| 85 |
+
median=self.median,
|
| 86 |
+
avg=self.avg,
|
| 87 |
+
global_avg=self.global_avg,
|
| 88 |
+
max=self.max,
|
| 89 |
+
value=self.value)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class MetricLogger(object):
|
| 93 |
+
def __init__(self, delimiter="\t"):
|
| 94 |
+
self.meters = defaultdict(SmoothedValue)
|
| 95 |
+
self.delimiter = delimiter
|
| 96 |
+
|
| 97 |
+
def update(self, **kwargs):
|
| 98 |
+
for k, v in kwargs.items():
|
| 99 |
+
if isinstance(v, torch.Tensor):
|
| 100 |
+
v = v.item()
|
| 101 |
+
assert isinstance(v, (float, int))
|
| 102 |
+
self.meters[k].update(v)
|
| 103 |
+
|
| 104 |
+
def __getattr__(self, attr):
|
| 105 |
+
if attr in self.meters:
|
| 106 |
+
return self.meters[attr]
|
| 107 |
+
if attr in self.__dict__:
|
| 108 |
+
return self.__dict__[attr]
|
| 109 |
+
raise AttributeError("'{}' object has no attribute '{}'".format(
|
| 110 |
+
type(self).__name__, attr))
|
| 111 |
+
|
| 112 |
+
def __str__(self):
|
| 113 |
+
loss_str = []
|
| 114 |
+
for name, meter in self.meters.items():
|
| 115 |
+
loss_str.append(
|
| 116 |
+
"{}: {}".format(name, str(meter))
|
| 117 |
+
)
|
| 118 |
+
return self.delimiter.join(loss_str)
|
| 119 |
+
|
| 120 |
+
def global_avg(self):
|
| 121 |
+
loss_str = []
|
| 122 |
+
for name, meter in self.meters.items():
|
| 123 |
+
loss_str.append(
|
| 124 |
+
"{}: {:.4f}".format(name, meter.global_avg)
|
| 125 |
+
)
|
| 126 |
+
return self.delimiter.join(loss_str)
|
| 127 |
+
|
| 128 |
+
def synchronize_between_processes(self):
|
| 129 |
+
for meter in self.meters.values():
|
| 130 |
+
meter.synchronize_between_processes()
|
| 131 |
+
|
| 132 |
+
def add_meter(self, name, meter):
|
| 133 |
+
self.meters[name] = meter
|
| 134 |
+
|
| 135 |
+
def log_every(self, iterable, print_freq, header=None):
|
| 136 |
+
i = 0
|
| 137 |
+
if not header:
|
| 138 |
+
header = ''
|
| 139 |
+
start_time = time.time()
|
| 140 |
+
end = time.time()
|
| 141 |
+
iter_time = SmoothedValue(fmt='{avg:.4f}')
|
| 142 |
+
data_time = SmoothedValue(fmt='{avg:.4f}')
|
| 143 |
+
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
| 144 |
+
log_msg = [
|
| 145 |
+
header,
|
| 146 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 147 |
+
'eta: {eta}',
|
| 148 |
+
'{meters}',
|
| 149 |
+
'time: {time}',
|
| 150 |
+
'data: {data}'
|
| 151 |
+
]
|
| 152 |
+
if torch.cuda.is_available():
|
| 153 |
+
log_msg.append('max mem: {memory:.0f}')
|
| 154 |
+
log_msg = self.delimiter.join(log_msg)
|
| 155 |
+
MB = 1024.0 * 1024.0
|
| 156 |
+
for obj in iterable:
|
| 157 |
+
data_time.update(time.time() - end)
|
| 158 |
+
yield obj
|
| 159 |
+
iter_time.update(time.time() - end)
|
| 160 |
+
if i % print_freq == 0 or i == len(iterable) - 1:
|
| 161 |
+
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
| 162 |
+
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 163 |
+
if torch.cuda.is_available():
|
| 164 |
+
print(log_msg.format(
|
| 165 |
+
i, len(iterable), eta=eta_string,
|
| 166 |
+
meters=str(self),
|
| 167 |
+
time=str(iter_time), data=str(data_time),
|
| 168 |
+
memory=torch.cuda.max_memory_allocated() / MB))
|
| 169 |
+
else:
|
| 170 |
+
print(log_msg.format(
|
| 171 |
+
i, len(iterable), eta=eta_string,
|
| 172 |
+
meters=str(self),
|
| 173 |
+
time=str(iter_time), data=str(data_time)))
|
| 174 |
+
i += 1
|
| 175 |
+
end = time.time()
|
| 176 |
+
total_time = time.time() - start_time
|
| 177 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 178 |
+
print('{} Total time: {} ({:.4f} s / it)'.format(
|
| 179 |
+
header, total_time_str, total_time / len(iterable)))
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class AttrDict(dict):
|
| 183 |
+
def __init__(self, *args, **kwargs):
|
| 184 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
| 185 |
+
self.__dict__ = self
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def compute_acc(logits, label, reduction='mean'):
|
| 189 |
+
ret = (torch.argmax(logits, dim=1) == label).float()
|
| 190 |
+
if reduction == 'none':
|
| 191 |
+
return ret.detach()
|
| 192 |
+
elif reduction == 'mean':
|
| 193 |
+
return ret.mean().item()
|
| 194 |
+
|
| 195 |
+
def compute_n_params(model, return_str=True):
|
| 196 |
+
tot = 0
|
| 197 |
+
for p in model.parameters():
|
| 198 |
+
w = 1
|
| 199 |
+
for x in p.shape:
|
| 200 |
+
w *= x
|
| 201 |
+
tot += w
|
| 202 |
+
if return_str:
|
| 203 |
+
if tot >= 1e6:
|
| 204 |
+
return '{:.1f}M'.format(tot / 1e6)
|
| 205 |
+
else:
|
| 206 |
+
return '{:.1f}K'.format(tot / 1e3)
|
| 207 |
+
else:
|
| 208 |
+
return tot
|
| 209 |
+
|
| 210 |
+
def setup_for_distributed(is_master):
|
| 211 |
+
"""
|
| 212 |
+
This function disables printing when not in master process
|
| 213 |
+
"""
|
| 214 |
+
import builtins as __builtin__
|
| 215 |
+
builtin_print = __builtin__.print
|
| 216 |
+
|
| 217 |
+
def print(*args, **kwargs):
|
| 218 |
+
force = kwargs.pop('force', False)
|
| 219 |
+
if is_master or force:
|
| 220 |
+
builtin_print(*args, **kwargs)
|
| 221 |
+
|
| 222 |
+
__builtin__.print = print
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def is_dist_avail_and_initialized():
|
| 226 |
+
if not dist.is_available():
|
| 227 |
+
return False
|
| 228 |
+
if not dist.is_initialized():
|
| 229 |
+
return False
|
| 230 |
+
return True
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def get_world_size():
|
| 234 |
+
if not is_dist_avail_and_initialized():
|
| 235 |
+
return 1
|
| 236 |
+
return dist.get_world_size()
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def get_rank():
|
| 240 |
+
if not is_dist_avail_and_initialized():
|
| 241 |
+
return 0
|
| 242 |
+
return dist.get_rank()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def is_main_process():
|
| 246 |
+
return get_rank() == 0
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def save_on_master(*args, **kwargs):
|
| 250 |
+
if is_main_process():
|
| 251 |
+
torch.save(*args, **kwargs)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def init_distributed_mode(args):
|
| 255 |
+
#加入校验,看是否已经initialize了
|
| 256 |
+
if not dist.is_available():
|
| 257 |
+
return
|
| 258 |
+
if dist.is_initialized():
|
| 259 |
+
return
|
| 260 |
+
|
| 261 |
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
| 262 |
+
args.rank = int(os.environ["RANK"])
|
| 263 |
+
args.world_size = int(os.environ['WORLD_SIZE'])
|
| 264 |
+
args.gpu = int(os.environ['LOCAL_RANK'])
|
| 265 |
+
elif 'SLURM_PROCID' in os.environ:
|
| 266 |
+
args.rank = int(os.environ['SLURM_PROCID'])
|
| 267 |
+
args.gpu = args.rank % torch.cuda.device_count()
|
| 268 |
+
else:
|
| 269 |
+
print('Not using distributed mode')
|
| 270 |
+
args.distributed = False
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
args.distributed = True
|
| 274 |
+
|
| 275 |
+
torch.cuda.set_device(args.gpu)
|
| 276 |
+
args.dist_backend = 'nccl'
|
| 277 |
+
print('| distributed init (rank {}, word {}): {}'.format(
|
| 278 |
+
args.rank, args.world_size, args.dist_url), flush=True)
|
| 279 |
+
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
| 280 |
+
world_size=args.world_size, rank=args.rank)
|
| 281 |
+
torch.distributed.barrier()
|
| 282 |
+
setup_for_distributed(args.rank == 0)
|
| 283 |
+
|
| 284 |
+
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP_vqa.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from tqdm import tqdm, trange
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
from tqdm.auto import tqdm
|
| 11 |
+
import sys
|
| 12 |
+
import spacy
|
| 13 |
+
|
| 14 |
+
from BLIP.train_vqa_func import VQA_main
|
| 15 |
+
|
| 16 |
+
def Create_annotation_for_BLIP(image_folder, outpath, np_index=None):
|
| 17 |
+
nlp = spacy.load("en_core_web_sm")
|
| 18 |
+
|
| 19 |
+
annotations = []
|
| 20 |
+
file_names = os.listdir(image_folder)
|
| 21 |
+
file_names.sort(key=lambda x: int(x.split("_")[-1].split('.')[0]))#sort
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
cnt=0
|
| 25 |
+
|
| 26 |
+
#output annotation.json
|
| 27 |
+
for file_name in file_names:
|
| 28 |
+
image_dict={}
|
| 29 |
+
image_dict['image'] = image_folder+file_name
|
| 30 |
+
image_dict['question_id']= cnt
|
| 31 |
+
f = file_name.split('_')[0]
|
| 32 |
+
doc = nlp(f)
|
| 33 |
+
|
| 34 |
+
noun_phrases = []
|
| 35 |
+
for chunk in doc.noun_chunks:
|
| 36 |
+
if chunk.text not in ['top', 'the side', 'the left', 'the right']: # todo remove some phrases
|
| 37 |
+
noun_phrases.append(chunk.text)
|
| 38 |
+
if(len(noun_phrases)>np_index):
|
| 39 |
+
q_tmp = noun_phrases[np_index]
|
| 40 |
+
image_dict['question']=f'{q_tmp}?'
|
| 41 |
+
else:
|
| 42 |
+
image_dict['question'] = ''
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
image_dict['dataset']="color"
|
| 46 |
+
cnt+=1
|
| 47 |
+
|
| 48 |
+
annotations.append(image_dict)
|
| 49 |
+
|
| 50 |
+
print('Number of Processed Images:', len(annotations))
|
| 51 |
+
|
| 52 |
+
json_file = json.dumps(annotations)
|
| 53 |
+
with open(f'{outpath}/vqa_test.json', 'w') as f:
|
| 54 |
+
f.write(json_file)
|
| 55 |
+
|
| 56 |
+
def parse_args():
|
| 57 |
+
parser = argparse.ArgumentParser(description="BLIP vqa evaluation.")
|
| 58 |
+
parser.add_argument(
|
| 59 |
+
"--out_dir",
|
| 60 |
+
type=str,
|
| 61 |
+
default=None,
|
| 62 |
+
required=True,
|
| 63 |
+
help="Path to output BLIP vqa score",
|
| 64 |
+
)
|
| 65 |
+
parser.add_argument(
|
| 66 |
+
"--np_num",
|
| 67 |
+
type=int,
|
| 68 |
+
default=8,
|
| 69 |
+
help="Noun phrase number, can be greater or equal to the actual noun phrase number",
|
| 70 |
+
)
|
| 71 |
+
args = parser.parse_args()
|
| 72 |
+
return args
|
| 73 |
+
|
| 74 |
+
def main():
|
| 75 |
+
args = parse_args()
|
| 76 |
+
np_index = args.np_num #how many noun phrases
|
| 77 |
+
|
| 78 |
+
answer = []
|
| 79 |
+
sample_num = len(os.listdir(os.path.join(args.out_dir,"samples")))
|
| 80 |
+
reward = torch.zeros((sample_num, np_index)).to(device='cuda')
|
| 81 |
+
|
| 82 |
+
out_dir = args.out_dir
|
| 83 |
+
|
| 84 |
+
order="_blip" #rename file
|
| 85 |
+
for i in tqdm(range(np_index)):
|
| 86 |
+
print(f"start VQA{i+1}/{np_index}!")
|
| 87 |
+
os.makedirs(f"{out_dir}/annotation{i + 1}{order}", exist_ok=True)
|
| 88 |
+
os.makedirs(f"{out_dir}/annotation{i + 1}{order}/VQA/", exist_ok=True)
|
| 89 |
+
Create_annotation_for_BLIP(
|
| 90 |
+
f"{out_dir}/samples/",
|
| 91 |
+
f"{out_dir}/annotation{i + 1}{order}",
|
| 92 |
+
np_index=i,
|
| 93 |
+
)
|
| 94 |
+
answer_tmp = VQA_main(f"{out_dir}/annotation{i + 1}{order}/",
|
| 95 |
+
f"{out_dir}/annotation{i + 1}{order}/VQA/")
|
| 96 |
+
answer.append(answer_tmp)
|
| 97 |
+
|
| 98 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/VQA/result/vqa_result.json", "r") as file:
|
| 99 |
+
r = json.load(file)
|
| 100 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/vqa_test.json", "r") as file:
|
| 101 |
+
r_tmp = json.load(file)
|
| 102 |
+
for k in range(len(r)):
|
| 103 |
+
if(r_tmp[k]['question']!=''):
|
| 104 |
+
reward[k][i] = float(r[k]["answer"])
|
| 105 |
+
else:
|
| 106 |
+
reward[k][i] = 1
|
| 107 |
+
print(f"end VQA{i+1}/{np_index}!")
|
| 108 |
+
reward_final = reward[:,0]
|
| 109 |
+
for i in range(1,np_index):
|
| 110 |
+
reward_final *= reward[:,i]
|
| 111 |
+
|
| 112 |
+
#output final json
|
| 113 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/VQA/result/vqa_result.json", "r") as file:
|
| 114 |
+
r = json.load(file)
|
| 115 |
+
reward_after=0
|
| 116 |
+
for k in range(len(r)):
|
| 117 |
+
r[k]["answer"] = '{:.4f}'.format(reward_final[k].item())
|
| 118 |
+
reward_after+=float(r[k]["answer"])
|
| 119 |
+
os.makedirs(f"{out_dir}/annotation{order}", exist_ok=True)
|
| 120 |
+
with open(f"{out_dir}/annotation{order}/vqa_result.json", "w") as file:
|
| 121 |
+
json.dump(r, file)
|
| 122 |
+
|
| 123 |
+
# calculate avg of BLIP-VQA as BLIP-VQA score
|
| 124 |
+
print("BLIP-VQA score:", reward_after/len(r),'!\n')
|
| 125 |
+
with open(f"{out_dir}/annotation{order}/blip_vqa_score.txt", "w") as file:
|
| 126 |
+
file.write("BLIP-VQA score:"+str(reward_after/len(r)))
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
if __name__ == "__main__":
|
| 131 |
+
main()
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/BLIP_vqa_eval_agent.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from tqdm import tqdm, trange
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
from tqdm.auto import tqdm
|
| 11 |
+
import sys
|
| 12 |
+
import spacy
|
| 13 |
+
|
| 14 |
+
from eval_tools.t2i_comp.BLIPvqa_eval.BLIP.train_vqa_func import VQA_main
|
| 15 |
+
import shutil
|
| 16 |
+
import secrets
|
| 17 |
+
import string
|
| 18 |
+
|
| 19 |
+
def Create_annotation_for_BLIP(image_pairs, outpath, np_index=None):
|
| 20 |
+
nlp = spacy.load("en_core_web_sm")
|
| 21 |
+
|
| 22 |
+
annotations = []
|
| 23 |
+
cnt=0
|
| 24 |
+
|
| 25 |
+
for info in image_pairs:
|
| 26 |
+
|
| 27 |
+
image_dict={}
|
| 28 |
+
image_dict['image'] = info["content_path"]
|
| 29 |
+
image_dict['question_id']= cnt
|
| 30 |
+
f = info["prompt"]
|
| 31 |
+
doc = nlp(f)
|
| 32 |
+
|
| 33 |
+
noun_phrases = []
|
| 34 |
+
for chunk in doc.noun_chunks:
|
| 35 |
+
if chunk.text not in ['top', 'the side', 'the left', 'the right']: # todo remove some phrases
|
| 36 |
+
noun_phrases.append(chunk.text)
|
| 37 |
+
if(len(noun_phrases)>np_index):
|
| 38 |
+
q_tmp = noun_phrases[np_index]
|
| 39 |
+
image_dict['question']=f'{q_tmp}?'
|
| 40 |
+
else:
|
| 41 |
+
image_dict['question'] = ''
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
image_dict['dataset']="color"
|
| 45 |
+
cnt+=1
|
| 46 |
+
|
| 47 |
+
annotations.append(image_dict)
|
| 48 |
+
|
| 49 |
+
print('Number of Processed Images:', len(annotations))
|
| 50 |
+
|
| 51 |
+
json_file = json.dumps(annotations)
|
| 52 |
+
with open(f'{outpath}/vqa_test.json', 'w') as f:
|
| 53 |
+
f.write(json_file)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def blip_vqa(out_dir, image_pairs, np_num=8):
|
| 58 |
+
|
| 59 |
+
np_index = np_num #how many noun phrases
|
| 60 |
+
|
| 61 |
+
answer = []
|
| 62 |
+
sample_num = len(image_pairs)
|
| 63 |
+
reward = torch.zeros((sample_num, np_index)).to(device='cuda')
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
order="_blip" #rename file
|
| 67 |
+
for i in tqdm(range(np_index)):
|
| 68 |
+
print(f"start VQA{i+1}/{np_index}!")
|
| 69 |
+
os.makedirs(f"{out_dir}/annotation{i + 1}{order}", exist_ok=True)
|
| 70 |
+
os.makedirs(f"{out_dir}/annotation{i + 1}{order}/VQA/", exist_ok=True)
|
| 71 |
+
Create_annotation_for_BLIP(
|
| 72 |
+
image_pairs,
|
| 73 |
+
f"{out_dir}/annotation{i + 1}{order}",
|
| 74 |
+
np_index=i,
|
| 75 |
+
)
|
| 76 |
+
answer_tmp = VQA_main(f"{out_dir}/annotation{i + 1}{order}/",
|
| 77 |
+
f"{out_dir}/annotation{i + 1}{order}/VQA/")
|
| 78 |
+
answer.append(answer_tmp)
|
| 79 |
+
|
| 80 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/VQA/result/vqa_result.json", "r") as file:
|
| 81 |
+
r = json.load(file)
|
| 82 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/vqa_test.json", "r") as file:
|
| 83 |
+
r_tmp = json.load(file)
|
| 84 |
+
for k in range(len(r)):
|
| 85 |
+
if(r_tmp[k]['question']!=''):
|
| 86 |
+
reward[k][i] = float(r[k]["answer"])
|
| 87 |
+
else:
|
| 88 |
+
reward[k][i] = 1
|
| 89 |
+
print(f"end VQA{i+1}/{np_index}!")
|
| 90 |
+
reward_final = reward[:,0]
|
| 91 |
+
for i in range(1,np_index):
|
| 92 |
+
reward_final *= reward[:,i]
|
| 93 |
+
|
| 94 |
+
#output final json
|
| 95 |
+
with open(f"{out_dir}/annotation{i + 1}{order}/VQA/result/vqa_result.json", "r") as file:
|
| 96 |
+
r = json.load(file)
|
| 97 |
+
reward_after=0
|
| 98 |
+
for k in range(len(r)):
|
| 99 |
+
r[k]["answer"] = '{:.4f}'.format(reward_final[k].item())
|
| 100 |
+
reward_after+=float(r[k]["answer"])
|
| 101 |
+
|
| 102 |
+
results = []
|
| 103 |
+
for info_image, info_result in zip(image_pairs, r):
|
| 104 |
+
results.append({'prompt':info_image["prompt"], 'image_path': info_image["content_path"], 'image_results': float(info_result["answer"])})
|
| 105 |
+
|
| 106 |
+
avg_score = reward_after/len(r)
|
| 107 |
+
|
| 108 |
+
return {
|
| 109 |
+
"score":[avg_score, results]
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def generate_secure_random_number(length):
|
| 115 |
+
digits = string.digits
|
| 116 |
+
secure_random_number = ''.join(secrets.choice(digits) for i in range(length))
|
| 117 |
+
return secure_random_number
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def calculate_attribute_binding(image_pairs):
|
| 121 |
+
random_string = image_pairs[-1]["prompt"].replace(" ","_")+generate_secure_random_number(7)
|
| 122 |
+
out_path = "./folder_temporary_" + random_string
|
| 123 |
+
eval_results = blip_vqa(out_path, image_pairs, np_num=8)
|
| 124 |
+
shutil.rmtree(out_path)
|
| 125 |
+
return eval_results
|
| 126 |
+
|
| 127 |
+
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/bert_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"hidden_act": "gelu",
|
| 7 |
+
"hidden_dropout_prob": 0.1,
|
| 8 |
+
"hidden_size": 768,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 3072,
|
| 11 |
+
"layer_norm_eps": 1e-12,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "bert",
|
| 14 |
+
"num_attention_heads": 12,
|
| 15 |
+
"num_hidden_layers": 12,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"type_vocab_size": 2,
|
| 18 |
+
"vocab_size": 30522,
|
| 19 |
+
"encoder_width": 768,
|
| 20 |
+
"add_cross_attention": true
|
| 21 |
+
}
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/med_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"hidden_act": "gelu",
|
| 7 |
+
"hidden_dropout_prob": 0.1,
|
| 8 |
+
"hidden_size": 768,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 3072,
|
| 11 |
+
"layer_norm_eps": 1e-12,
|
| 12 |
+
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "bert",
|
| 14 |
+
"num_attention_heads": 12,
|
| 15 |
+
"num_hidden_layers": 12,
|
| 16 |
+
"pad_token_id": 0,
|
| 17 |
+
"type_vocab_size": 2,
|
| 18 |
+
"vocab_size": 30524,
|
| 19 |
+
"encoder_width": 768,
|
| 20 |
+
"add_cross_attention": true
|
| 21 |
+
}
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/vqa.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/
|
| 2 |
+
vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/
|
| 3 |
+
train_files: ['vqa_train','vqa_val','vg_qa']
|
| 4 |
+
ann_root: ''
|
| 5 |
+
|
| 6 |
+
# set pretrained as a file path or an url
|
| 7 |
+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
|
| 8 |
+
|
| 9 |
+
# size of vit model; base or large
|
| 10 |
+
vit: 'base'
|
| 11 |
+
batch_size_train: 16
|
| 12 |
+
batch_size_test: 32
|
| 13 |
+
vit_grad_ckpt: False
|
| 14 |
+
vit_ckpt_layer: 0
|
| 15 |
+
init_lr: 2e-5
|
| 16 |
+
|
| 17 |
+
image_size: 480
|
| 18 |
+
|
| 19 |
+
inference: 'vqa_prob'
|
| 20 |
+
|
| 21 |
+
# optimizer
|
| 22 |
+
weight_decay: 0.05
|
| 23 |
+
min_lr: 0
|
| 24 |
+
max_epoch: 10
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/__init__.py
ADDED
|
File without changes
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 6 |
+
* By Junnan Li
|
| 7 |
+
'''
|
| 8 |
+
import warnings
|
| 9 |
+
warnings.filterwarnings("ignore")
|
| 10 |
+
|
| 11 |
+
from models.vit import VisionTransformer, interpolate_pos_embed
|
| 12 |
+
from models.med import BertConfig, BertModel, BertLMHeadModel
|
| 13 |
+
from transformers import BertTokenizer
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
from urllib.parse import urlparse
|
| 21 |
+
from timm.models.hub import download_cached_file
|
| 22 |
+
|
| 23 |
+
class BLIP_Base(nn.Module):
|
| 24 |
+
def __init__(self,
|
| 25 |
+
med_config = 'configs/med_config.json',
|
| 26 |
+
image_size = 224,
|
| 27 |
+
vit = 'base',
|
| 28 |
+
vit_grad_ckpt = False,
|
| 29 |
+
vit_ckpt_layer = 0,
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
Args:
|
| 33 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
| 34 |
+
image_size (int): input image size
|
| 35 |
+
vit (str): model size of vision transformer
|
| 36 |
+
"""
|
| 37 |
+
super().__init__()
|
| 38 |
+
|
| 39 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
| 40 |
+
self.tokenizer = init_tokenizer()
|
| 41 |
+
med_config = BertConfig.from_json_file(med_config)
|
| 42 |
+
med_config.encoder_width = vision_width
|
| 43 |
+
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def forward(self, image, caption, mode):
|
| 47 |
+
|
| 48 |
+
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
|
| 49 |
+
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
|
| 50 |
+
|
| 51 |
+
if mode=='image':
|
| 52 |
+
# return image features
|
| 53 |
+
image_embeds = self.visual_encoder(image)
|
| 54 |
+
return image_embeds
|
| 55 |
+
|
| 56 |
+
elif mode=='text':
|
| 57 |
+
# return text features
|
| 58 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
| 59 |
+
return_dict = True, mode = 'text')
|
| 60 |
+
return text_output.last_hidden_state
|
| 61 |
+
|
| 62 |
+
elif mode=='multimodal':
|
| 63 |
+
# return multimodel features
|
| 64 |
+
image_embeds = self.visual_encoder(image)
|
| 65 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
| 66 |
+
|
| 67 |
+
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
| 68 |
+
output = self.text_encoder(text.input_ids,
|
| 69 |
+
attention_mask = text.attention_mask,
|
| 70 |
+
encoder_hidden_states = image_embeds,
|
| 71 |
+
encoder_attention_mask = image_atts,
|
| 72 |
+
return_dict = True,
|
| 73 |
+
)
|
| 74 |
+
return output.last_hidden_state
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class BLIP_Decoder(nn.Module):
|
| 79 |
+
def __init__(self,
|
| 80 |
+
med_config = 'configs/med_config.json',
|
| 81 |
+
image_size = 384,
|
| 82 |
+
vit = 'base',
|
| 83 |
+
vit_grad_ckpt = False,
|
| 84 |
+
vit_ckpt_layer = 0,
|
| 85 |
+
prompt = 'a picture of ',
|
| 86 |
+
):
|
| 87 |
+
"""
|
| 88 |
+
Args:
|
| 89 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
| 90 |
+
image_size (int): input image size
|
| 91 |
+
vit (str): model size of vision transformer
|
| 92 |
+
"""
|
| 93 |
+
super().__init__()
|
| 94 |
+
|
| 95 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
| 96 |
+
self.tokenizer = init_tokenizer()
|
| 97 |
+
med_config = BertConfig.from_json_file(med_config)
|
| 98 |
+
med_config.encoder_width = vision_width
|
| 99 |
+
self.text_decoder = BertLMHeadModel(config=med_config)
|
| 100 |
+
|
| 101 |
+
self.prompt = prompt
|
| 102 |
+
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def forward(self, image, caption):
|
| 106 |
+
|
| 107 |
+
image_embeds = self.visual_encoder(image)
|
| 108 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
| 109 |
+
|
| 110 |
+
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
|
| 111 |
+
|
| 112 |
+
text.input_ids[:,0] = self.tokenizer.bos_token_id
|
| 113 |
+
|
| 114 |
+
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
|
| 115 |
+
decoder_targets[:,:self.prompt_length] = -100
|
| 116 |
+
|
| 117 |
+
decoder_output = self.text_decoder(text.input_ids,
|
| 118 |
+
attention_mask = text.attention_mask,
|
| 119 |
+
encoder_hidden_states = image_embeds,
|
| 120 |
+
encoder_attention_mask = image_atts,
|
| 121 |
+
labels = decoder_targets,
|
| 122 |
+
return_dict = True,
|
| 123 |
+
)
|
| 124 |
+
loss_lm = decoder_output.loss
|
| 125 |
+
|
| 126 |
+
return loss_lm
|
| 127 |
+
|
| 128 |
+
def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
|
| 129 |
+
image_embeds = self.visual_encoder(image)
|
| 130 |
+
|
| 131 |
+
if not sample:
|
| 132 |
+
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
|
| 133 |
+
|
| 134 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
| 135 |
+
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
|
| 136 |
+
|
| 137 |
+
prompt = [self.prompt] * image.size(0)
|
| 138 |
+
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
|
| 139 |
+
input_ids[:,0] = self.tokenizer.bos_token_id
|
| 140 |
+
input_ids = input_ids[:, :-1]
|
| 141 |
+
|
| 142 |
+
if sample:
|
| 143 |
+
#nucleus sampling
|
| 144 |
+
outputs = self.text_decoder.generate(input_ids=input_ids,
|
| 145 |
+
max_length=max_length,
|
| 146 |
+
min_length=min_length,
|
| 147 |
+
do_sample=True,
|
| 148 |
+
top_p=top_p,
|
| 149 |
+
num_return_sequences=1,
|
| 150 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
| 151 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 152 |
+
repetition_penalty=1.1,
|
| 153 |
+
**model_kwargs)
|
| 154 |
+
else:
|
| 155 |
+
#beam search
|
| 156 |
+
outputs = self.text_decoder.generate(input_ids=input_ids,
|
| 157 |
+
max_length=max_length,
|
| 158 |
+
min_length=min_length,
|
| 159 |
+
num_beams=num_beams,
|
| 160 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
| 161 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 162 |
+
repetition_penalty=repetition_penalty,
|
| 163 |
+
**model_kwargs)
|
| 164 |
+
|
| 165 |
+
captions = []
|
| 166 |
+
for output in outputs:
|
| 167 |
+
caption = self.tokenizer.decode(output, skip_special_tokens=True)
|
| 168 |
+
captions.append(caption[len(self.prompt):])
|
| 169 |
+
return captions
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def blip_decoder(pretrained='',**kwargs):
|
| 173 |
+
model = BLIP_Decoder(**kwargs)
|
| 174 |
+
if pretrained:
|
| 175 |
+
model,msg = load_checkpoint(model,pretrained)
|
| 176 |
+
assert(len(msg.missing_keys)==0)
|
| 177 |
+
return model
|
| 178 |
+
|
| 179 |
+
def blip_feature_extractor(pretrained='',**kwargs):
|
| 180 |
+
model = BLIP_Base(**kwargs)
|
| 181 |
+
if pretrained:
|
| 182 |
+
model,msg = load_checkpoint(model,pretrained)
|
| 183 |
+
assert(len(msg.missing_keys)==0)
|
| 184 |
+
return model
|
| 185 |
+
|
| 186 |
+
def init_tokenizer():
|
| 187 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
| 188 |
+
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
|
| 189 |
+
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
|
| 190 |
+
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
|
| 191 |
+
return tokenizer
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
|
| 195 |
+
|
| 196 |
+
assert vit in ['base', 'large'], "vit parameter must be base or large"
|
| 197 |
+
if vit=='base':
|
| 198 |
+
vision_width = 768
|
| 199 |
+
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
|
| 200 |
+
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
| 201 |
+
drop_path_rate=0 or drop_path_rate
|
| 202 |
+
)
|
| 203 |
+
elif vit=='large':
|
| 204 |
+
vision_width = 1024
|
| 205 |
+
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
|
| 206 |
+
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
| 207 |
+
drop_path_rate=0.1 or drop_path_rate
|
| 208 |
+
)
|
| 209 |
+
return visual_encoder, vision_width
|
| 210 |
+
|
| 211 |
+
def is_url(url_or_filename):
|
| 212 |
+
parsed = urlparse(url_or_filename)
|
| 213 |
+
return parsed.scheme in ("http", "https")
|
| 214 |
+
|
| 215 |
+
def load_checkpoint(model,url_or_filename):
|
| 216 |
+
if is_url(url_or_filename):
|
| 217 |
+
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
| 218 |
+
checkpoint = torch.load(cached_file, map_location='cpu')
|
| 219 |
+
elif os.path.isfile(url_or_filename):
|
| 220 |
+
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
| 221 |
+
else:
|
| 222 |
+
raise RuntimeError('checkpoint url or path is invalid')
|
| 223 |
+
|
| 224 |
+
state_dict = checkpoint['model']
|
| 225 |
+
|
| 226 |
+
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
| 227 |
+
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
|
| 228 |
+
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
|
| 229 |
+
model.visual_encoder_m)
|
| 230 |
+
for key in model.state_dict().keys():
|
| 231 |
+
if key in state_dict.keys():
|
| 232 |
+
if state_dict[key].shape!=model.state_dict()[key].shape:
|
| 233 |
+
del state_dict[key]
|
| 234 |
+
|
| 235 |
+
msg = model.load_state_dict(state_dict,strict=False)
|
| 236 |
+
print('load checkpoint from %s'%url_or_filename)
|
| 237 |
+
return model,msg
|
| 238 |
+
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip_pretrain.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 6 |
+
* By Junnan Li
|
| 7 |
+
'''
|
| 8 |
+
from models.med import BertConfig, BertModel, BertLMHeadModel
|
| 9 |
+
from transformers import BertTokenizer
|
| 10 |
+
import transformers
|
| 11 |
+
transformers.logging.set_verbosity_error()
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from torch import nn
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
|
| 17 |
+
from models.blip import create_vit, init_tokenizer, load_checkpoint
|
| 18 |
+
|
| 19 |
+
class BLIP_Pretrain(nn.Module):
|
| 20 |
+
def __init__(self,
|
| 21 |
+
med_config = 'configs/bert_config.json',
|
| 22 |
+
image_size = 224,
|
| 23 |
+
vit = 'base',
|
| 24 |
+
vit_grad_ckpt = False,
|
| 25 |
+
vit_ckpt_layer = 0,
|
| 26 |
+
embed_dim = 256,
|
| 27 |
+
queue_size = 57600,
|
| 28 |
+
momentum = 0.995,
|
| 29 |
+
):
|
| 30 |
+
"""
|
| 31 |
+
Args:
|
| 32 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
| 33 |
+
image_size (int): input image size
|
| 34 |
+
vit (str): model size of vision transformer
|
| 35 |
+
"""
|
| 36 |
+
super().__init__()
|
| 37 |
+
|
| 38 |
+
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0)
|
| 39 |
+
|
| 40 |
+
if vit=='base':
|
| 41 |
+
checkpoint = torch.hub.load_state_dict_from_url(
|
| 42 |
+
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
|
| 43 |
+
map_location="cpu", check_hash=True)
|
| 44 |
+
state_dict = checkpoint["model"]
|
| 45 |
+
msg = self.visual_encoder.load_state_dict(state_dict,strict=False)
|
| 46 |
+
elif vit=='large':
|
| 47 |
+
from timm.models.helpers import load_custom_pretrained
|
| 48 |
+
from timm.models.vision_transformer import default_cfgs
|
| 49 |
+
load_custom_pretrained(self.visual_encoder,default_cfgs['vit_large_patch16_224_in21k'])
|
| 50 |
+
|
| 51 |
+
self.tokenizer = init_tokenizer()
|
| 52 |
+
encoder_config = BertConfig.from_json_file(med_config)
|
| 53 |
+
encoder_config.encoder_width = vision_width
|
| 54 |
+
self.text_encoder = BertModel.from_pretrained('bert-base-uncased',config=encoder_config, add_pooling_layer=False)
|
| 55 |
+
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
|
| 56 |
+
|
| 57 |
+
text_width = self.text_encoder.config.hidden_size
|
| 58 |
+
|
| 59 |
+
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
| 60 |
+
self.text_proj = nn.Linear(text_width, embed_dim)
|
| 61 |
+
|
| 62 |
+
self.itm_head = nn.Linear(text_width, 2)
|
| 63 |
+
|
| 64 |
+
# create momentum encoders
|
| 65 |
+
self.visual_encoder_m, vision_width = create_vit(vit,image_size)
|
| 66 |
+
self.vision_proj_m = nn.Linear(vision_width, embed_dim)
|
| 67 |
+
self.text_encoder_m = BertModel(config=encoder_config, add_pooling_layer=False)
|
| 68 |
+
self.text_proj_m = nn.Linear(text_width, embed_dim)
|
| 69 |
+
|
| 70 |
+
self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
|
| 71 |
+
[self.vision_proj,self.vision_proj_m],
|
| 72 |
+
[self.text_encoder,self.text_encoder_m],
|
| 73 |
+
[self.text_proj,self.text_proj_m],
|
| 74 |
+
]
|
| 75 |
+
self.copy_params()
|
| 76 |
+
|
| 77 |
+
# create the queue
|
| 78 |
+
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
|
| 79 |
+
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
|
| 80 |
+
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
|
| 81 |
+
|
| 82 |
+
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
|
| 83 |
+
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
|
| 84 |
+
|
| 85 |
+
self.queue_size = queue_size
|
| 86 |
+
self.momentum = momentum
|
| 87 |
+
self.temp = nn.Parameter(0.07*torch.ones([]))
|
| 88 |
+
|
| 89 |
+
# create the decoder
|
| 90 |
+
decoder_config = BertConfig.from_json_file(med_config)
|
| 91 |
+
decoder_config.encoder_width = vision_width
|
| 92 |
+
self.text_decoder = BertLMHeadModel.from_pretrained('bert-base-uncased',config=decoder_config)
|
| 93 |
+
self.text_decoder.resize_token_embeddings(len(self.tokenizer))
|
| 94 |
+
tie_encoder_decoder_weights(self.text_encoder,self.text_decoder.bert,'','/attention')
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def forward(self, image, caption, alpha):
|
| 98 |
+
with torch.no_grad():
|
| 99 |
+
self.temp.clamp_(0.001,0.5)
|
| 100 |
+
|
| 101 |
+
image_embeds = self.visual_encoder(image)
|
| 102 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
| 103 |
+
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
| 104 |
+
|
| 105 |
+
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=30,
|
| 106 |
+
return_tensors="pt").to(image.device)
|
| 107 |
+
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
| 108 |
+
return_dict = True, mode = 'text')
|
| 109 |
+
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
| 110 |
+
|
| 111 |
+
# get momentum features
|
| 112 |
+
with torch.no_grad():
|
| 113 |
+
self._momentum_update()
|
| 114 |
+
image_embeds_m = self.visual_encoder_m(image)
|
| 115 |
+
image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
|
| 116 |
+
image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
|
| 117 |
+
|
| 118 |
+
text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
|
| 119 |
+
return_dict = True, mode = 'text')
|
| 120 |
+
text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
|
| 121 |
+
text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
|
| 122 |
+
|
| 123 |
+
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
|
| 124 |
+
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
|
| 125 |
+
|
| 126 |
+
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
|
| 127 |
+
sim_targets.fill_diagonal_(1)
|
| 128 |
+
|
| 129 |
+
sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
|
| 130 |
+
sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
|
| 131 |
+
|
| 132 |
+
sim_i2t = image_feat @ text_feat_all / self.temp
|
| 133 |
+
sim_t2i = text_feat @ image_feat_all / self.temp
|
| 134 |
+
|
| 135 |
+
loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
|
| 136 |
+
loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
|
| 137 |
+
|
| 138 |
+
loss_ita = (loss_i2t+loss_t2i)/2
|
| 139 |
+
|
| 140 |
+
self._dequeue_and_enqueue(image_feat_m, text_feat_m)
|
| 141 |
+
|
| 142 |
+
###============== Image-text Matching ===================###
|
| 143 |
+
encoder_input_ids = text.input_ids.clone()
|
| 144 |
+
encoder_input_ids[:,0] = self.tokenizer.enc_token_id
|
| 145 |
+
|
| 146 |
+
# forward the positve image-text pair
|
| 147 |
+
bs = image.size(0)
|
| 148 |
+
output_pos = self.text_encoder(encoder_input_ids,
|
| 149 |
+
attention_mask = text.attention_mask,
|
| 150 |
+
encoder_hidden_states = image_embeds,
|
| 151 |
+
encoder_attention_mask = image_atts,
|
| 152 |
+
return_dict = True,
|
| 153 |
+
)
|
| 154 |
+
with torch.no_grad():
|
| 155 |
+
weights_t2i = F.softmax(sim_t2i[:,:bs],dim=1)+1e-4
|
| 156 |
+
weights_t2i.fill_diagonal_(0)
|
| 157 |
+
weights_i2t = F.softmax(sim_i2t[:,:bs],dim=1)+1e-4
|
| 158 |
+
weights_i2t.fill_diagonal_(0)
|
| 159 |
+
|
| 160 |
+
# select a negative image for each text
|
| 161 |
+
image_embeds_neg = []
|
| 162 |
+
for b in range(bs):
|
| 163 |
+
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
| 164 |
+
image_embeds_neg.append(image_embeds[neg_idx])
|
| 165 |
+
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
| 166 |
+
|
| 167 |
+
# select a negative text for each image
|
| 168 |
+
text_ids_neg = []
|
| 169 |
+
text_atts_neg = []
|
| 170 |
+
for b in range(bs):
|
| 171 |
+
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
| 172 |
+
text_ids_neg.append(encoder_input_ids[neg_idx])
|
| 173 |
+
text_atts_neg.append(text.attention_mask[neg_idx])
|
| 174 |
+
|
| 175 |
+
text_ids_neg = torch.stack(text_ids_neg,dim=0)
|
| 176 |
+
text_atts_neg = torch.stack(text_atts_neg,dim=0)
|
| 177 |
+
|
| 178 |
+
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
|
| 179 |
+
text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
|
| 180 |
+
|
| 181 |
+
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
|
| 182 |
+
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
|
| 183 |
+
|
| 184 |
+
output_neg = self.text_encoder(text_ids_all,
|
| 185 |
+
attention_mask = text_atts_all,
|
| 186 |
+
encoder_hidden_states = image_embeds_all,
|
| 187 |
+
encoder_attention_mask = image_atts_all,
|
| 188 |
+
return_dict = True,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
|
| 192 |
+
vl_output = self.itm_head(vl_embeddings)
|
| 193 |
+
|
| 194 |
+
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
|
| 195 |
+
dim=0).to(image.device)
|
| 196 |
+
loss_itm = F.cross_entropy(vl_output, itm_labels)
|
| 197 |
+
|
| 198 |
+
##================= LM ========================##
|
| 199 |
+
decoder_input_ids = text.input_ids.clone()
|
| 200 |
+
decoder_input_ids[:,0] = self.tokenizer.bos_token_id
|
| 201 |
+
decoder_targets = decoder_input_ids.masked_fill(decoder_input_ids == self.tokenizer.pad_token_id, -100)
|
| 202 |
+
|
| 203 |
+
decoder_output = self.text_decoder(decoder_input_ids,
|
| 204 |
+
attention_mask = text.attention_mask,
|
| 205 |
+
encoder_hidden_states = image_embeds,
|
| 206 |
+
encoder_attention_mask = image_atts,
|
| 207 |
+
labels = decoder_targets,
|
| 208 |
+
return_dict = True,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
loss_lm = decoder_output.loss
|
| 212 |
+
return loss_ita, loss_itm, loss_lm
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
@torch.no_grad()
|
| 217 |
+
def copy_params(self):
|
| 218 |
+
for model_pair in self.model_pairs:
|
| 219 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
| 220 |
+
param_m.data.copy_(param.data) # initialize
|
| 221 |
+
param_m.requires_grad = False # not update by gradient
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@torch.no_grad()
|
| 225 |
+
def _momentum_update(self):
|
| 226 |
+
for model_pair in self.model_pairs:
|
| 227 |
+
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
| 228 |
+
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@torch.no_grad()
|
| 232 |
+
def _dequeue_and_enqueue(self, image_feat, text_feat):
|
| 233 |
+
# gather keys before updating queue
|
| 234 |
+
image_feats = concat_all_gather(image_feat)
|
| 235 |
+
text_feats = concat_all_gather(text_feat)
|
| 236 |
+
|
| 237 |
+
batch_size = image_feats.shape[0]
|
| 238 |
+
|
| 239 |
+
ptr = int(self.queue_ptr)
|
| 240 |
+
assert self.queue_size % batch_size == 0 # for simplicity
|
| 241 |
+
|
| 242 |
+
# replace the keys at ptr (dequeue and enqueue)
|
| 243 |
+
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
|
| 244 |
+
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
|
| 245 |
+
ptr = (ptr + batch_size) % self.queue_size # move pointer
|
| 246 |
+
|
| 247 |
+
self.queue_ptr[0] = ptr
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def blip_pretrain(**kwargs):
|
| 251 |
+
model = BLIP_Pretrain(**kwargs)
|
| 252 |
+
return model
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
@torch.no_grad()
|
| 256 |
+
def concat_all_gather(tensor):
|
| 257 |
+
"""
|
| 258 |
+
Performs all_gather operation on the provided tensors.
|
| 259 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
| 260 |
+
"""
|
| 261 |
+
tensors_gather = [torch.ones_like(tensor)
|
| 262 |
+
for _ in range(torch.distributed.get_world_size())]
|
| 263 |
+
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
| 264 |
+
|
| 265 |
+
output = torch.cat(tensors_gather, dim=0)
|
| 266 |
+
return output
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
from typing import List
|
| 270 |
+
def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key:str):
|
| 271 |
+
uninitialized_encoder_weights: List[str] = []
|
| 272 |
+
if decoder.__class__ != encoder.__class__:
|
| 273 |
+
logger.info(
|
| 274 |
+
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
def tie_encoder_to_decoder_recursively(
|
| 278 |
+
decoder_pointer: nn.Module,
|
| 279 |
+
encoder_pointer: nn.Module,
|
| 280 |
+
module_name: str,
|
| 281 |
+
uninitialized_encoder_weights: List[str],
|
| 282 |
+
skip_key: str,
|
| 283 |
+
depth=0,
|
| 284 |
+
):
|
| 285 |
+
assert isinstance(decoder_pointer, nn.Module) and isinstance(
|
| 286 |
+
encoder_pointer, nn.Module
|
| 287 |
+
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
|
| 288 |
+
if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
|
| 289 |
+
assert hasattr(encoder_pointer, "weight")
|
| 290 |
+
encoder_pointer.weight = decoder_pointer.weight
|
| 291 |
+
if hasattr(decoder_pointer, "bias"):
|
| 292 |
+
assert hasattr(encoder_pointer, "bias")
|
| 293 |
+
encoder_pointer.bias = decoder_pointer.bias
|
| 294 |
+
print(module_name+' is tied')
|
| 295 |
+
return
|
| 296 |
+
|
| 297 |
+
encoder_modules = encoder_pointer._modules
|
| 298 |
+
decoder_modules = decoder_pointer._modules
|
| 299 |
+
if len(decoder_modules) > 0:
|
| 300 |
+
assert (
|
| 301 |
+
len(encoder_modules) > 0
|
| 302 |
+
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
|
| 303 |
+
|
| 304 |
+
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
|
| 305 |
+
encoder_layer_pos = 0
|
| 306 |
+
for name, module in decoder_modules.items():
|
| 307 |
+
if name.isdigit():
|
| 308 |
+
encoder_name = str(int(name) + encoder_layer_pos)
|
| 309 |
+
decoder_name = name
|
| 310 |
+
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
|
| 311 |
+
encoder_modules
|
| 312 |
+
) != len(decoder_modules):
|
| 313 |
+
# this can happen if the name corresponds to the position in a list module list of layers
|
| 314 |
+
# in this case the decoder has added a cross-attention that the encoder does not have
|
| 315 |
+
# thus skip this step and subtract one layer pos from encoder
|
| 316 |
+
encoder_layer_pos -= 1
|
| 317 |
+
continue
|
| 318 |
+
elif name not in encoder_modules:
|
| 319 |
+
continue
|
| 320 |
+
elif depth > 500:
|
| 321 |
+
raise ValueError(
|
| 322 |
+
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
|
| 323 |
+
)
|
| 324 |
+
else:
|
| 325 |
+
decoder_name = encoder_name = name
|
| 326 |
+
tie_encoder_to_decoder_recursively(
|
| 327 |
+
decoder_modules[decoder_name],
|
| 328 |
+
encoder_modules[encoder_name],
|
| 329 |
+
module_name + "/" + name,
|
| 330 |
+
uninitialized_encoder_weights,
|
| 331 |
+
skip_key,
|
| 332 |
+
depth=depth + 1,
|
| 333 |
+
)
|
| 334 |
+
all_encoder_weights.remove(module_name + "/" + encoder_name)
|
| 335 |
+
|
| 336 |
+
uninitialized_encoder_weights += list(all_encoder_weights)
|
| 337 |
+
|
| 338 |
+
# tie weights recursively
|
| 339 |
+
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key)
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/blip_vqa.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from models.med import BertConfig, BertModel, BertLMHeadModel
|
| 3 |
+
from models.blip import create_vit, init_tokenizer, load_checkpoint
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from transformers import BertTokenizer
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BLIP_VQA(nn.Module):
|
| 13 |
+
def __init__(self,
|
| 14 |
+
med_config= '/mnt/petrelfs/zhangfan.p/zhangfan/evaluate-agent/agent/eval_tools/t2i_comp/BLIPvqa_eval/configs/med_config.json', #'configs/med_config.json', # todo
|
| 15 |
+
image_size=480,
|
| 16 |
+
vit='base',
|
| 17 |
+
vit_grad_ckpt=False,
|
| 18 |
+
vit_ckpt_layer=0,
|
| 19 |
+
):
|
| 20 |
+
"""
|
| 21 |
+
Args:
|
| 22 |
+
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
| 23 |
+
image_size (int): input image size
|
| 24 |
+
vit (str): model size of vision transformer
|
| 25 |
+
"""
|
| 26 |
+
super().__init__()
|
| 27 |
+
|
| 28 |
+
self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer,
|
| 29 |
+
drop_path_rate=0.1)
|
| 30 |
+
self.tokenizer = init_tokenizer()
|
| 31 |
+
|
| 32 |
+
encoder_config = BertConfig.from_json_file(med_config)
|
| 33 |
+
encoder_config.encoder_width = vision_width
|
| 34 |
+
self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False)
|
| 35 |
+
|
| 36 |
+
decoder_config = BertConfig.from_json_file(med_config)
|
| 37 |
+
self.text_decoder = BertLMHeadModel(config=decoder_config)
|
| 38 |
+
|
| 39 |
+
def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128):
|
| 40 |
+
|
| 41 |
+
image_embeds = self.visual_encoder(image)
|
| 42 |
+
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
|
| 43 |
+
|
| 44 |
+
question = self.tokenizer(question, padding='longest', truncation=True, max_length=35,
|
| 45 |
+
return_tensors="pt").to(image.device)
|
| 46 |
+
question.input_ids[:, 0] = self.tokenizer.enc_token_id
|
| 47 |
+
|
| 48 |
+
if train:
|
| 49 |
+
'''
|
| 50 |
+
n: number of answers for each question
|
| 51 |
+
weights: weight for each answer
|
| 52 |
+
'''
|
| 53 |
+
answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device)
|
| 54 |
+
answer.input_ids[:, 0] = self.tokenizer.bos_token_id
|
| 55 |
+
answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100)
|
| 56 |
+
|
| 57 |
+
question_output = self.text_encoder(question.input_ids,
|
| 58 |
+
attention_mask=question.attention_mask,
|
| 59 |
+
encoder_hidden_states=image_embeds,
|
| 60 |
+
encoder_attention_mask=image_atts,
|
| 61 |
+
return_dict=True)
|
| 62 |
+
|
| 63 |
+
question_states = []
|
| 64 |
+
question_atts = []
|
| 65 |
+
for b, n in enumerate(n):
|
| 66 |
+
question_states += [question_output.last_hidden_state[b]] * n
|
| 67 |
+
question_atts += [question.attention_mask[b]] * n
|
| 68 |
+
question_states = torch.stack(question_states, 0)
|
| 69 |
+
question_atts = torch.stack(question_atts, 0)
|
| 70 |
+
|
| 71 |
+
answer_output = self.text_decoder(answer.input_ids,
|
| 72 |
+
attention_mask=answer.attention_mask,
|
| 73 |
+
encoder_hidden_states=question_states,
|
| 74 |
+
encoder_attention_mask=question_atts,
|
| 75 |
+
labels=answer_targets,
|
| 76 |
+
return_dict=True,
|
| 77 |
+
reduction='none',
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
loss = weights * answer_output.loss
|
| 81 |
+
loss = loss.sum() / image.size(0)
|
| 82 |
+
|
| 83 |
+
return loss
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
else:
|
| 87 |
+
question_output = self.text_encoder(question.input_ids,
|
| 88 |
+
attention_mask=question.attention_mask,
|
| 89 |
+
encoder_hidden_states=image_embeds,
|
| 90 |
+
encoder_attention_mask=image_atts,
|
| 91 |
+
return_dict=True)
|
| 92 |
+
|
| 93 |
+
if inference == 'generate':
|
| 94 |
+
num_beams = 3
|
| 95 |
+
question_states = question_output.last_hidden_state.repeat_interleave(num_beams, dim=0)
|
| 96 |
+
question_atts = torch.ones(question_states.size()[:-1], dtype=torch.long).to(question_states.device)
|
| 97 |
+
model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask": question_atts}
|
| 98 |
+
|
| 99 |
+
bos_ids = torch.full((image.size(0), 1), fill_value=self.tokenizer.bos_token_id, device=image.device)
|
| 100 |
+
|
| 101 |
+
outputs = self.text_decoder.generate(input_ids=bos_ids,
|
| 102 |
+
max_length=10,
|
| 103 |
+
min_length=1,
|
| 104 |
+
num_beams=num_beams,
|
| 105 |
+
eos_token_id=self.tokenizer.sep_token_id,
|
| 106 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 107 |
+
**model_kwargs)
|
| 108 |
+
|
| 109 |
+
answers = []
|
| 110 |
+
for output in outputs:
|
| 111 |
+
answer = self.tokenizer.decode(output, skip_special_tokens=True)
|
| 112 |
+
answers.append(answer)
|
| 113 |
+
return answers
|
| 114 |
+
|
| 115 |
+
elif inference == 'rank':
|
| 116 |
+
max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask,
|
| 117 |
+
answer.input_ids, answer.attention_mask, k_test)
|
| 118 |
+
return max_ids
|
| 119 |
+
|
| 120 |
+
elif inference == 'vqa_prob':
|
| 121 |
+
answer_prob = []
|
| 122 |
+
probs = self.vqa_prob(question_output.last_hidden_state, question.attention_mask,
|
| 123 |
+
)
|
| 124 |
+
for p in probs:
|
| 125 |
+
answer = '{:.4f}'.format(p)
|
| 126 |
+
answer_prob.append(str(answer))
|
| 127 |
+
return answer_prob
|
| 128 |
+
|
| 129 |
+
return answer_prob
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def vqa_prob(self, question_states, question_atts):
|
| 134 |
+
|
| 135 |
+
num_ques = question_states.size(0)
|
| 136 |
+
start_ids = torch.tensor(30522).to(device=question_states.device).repeat(num_ques, 1) # bos token
|
| 137 |
+
|
| 138 |
+
start_output = self.text_decoder(start_ids,
|
| 139 |
+
encoder_hidden_states=question_states,
|
| 140 |
+
encoder_attention_mask=question_atts,
|
| 141 |
+
return_dict=True,
|
| 142 |
+
reduction='none')
|
| 143 |
+
logits = start_output.logits[:, 0, :] # first token's logit
|
| 144 |
+
prob = torch.softmax(logits, dim=1) # (batchsize,30524)
|
| 145 |
+
prob_yes = prob[:, 2748]
|
| 146 |
+
prob_no = prob[:, 2053]
|
| 147 |
+
return prob_yes / (prob_yes + prob_no) # tensor(batch_size,)
|
| 148 |
+
|
| 149 |
+
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
|
| 150 |
+
|
| 151 |
+
num_ques = question_states.size(0)
|
| 152 |
+
start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token
|
| 153 |
+
|
| 154 |
+
start_output = self.text_decoder(start_ids,
|
| 155 |
+
encoder_hidden_states=question_states,
|
| 156 |
+
encoder_attention_mask=question_atts,
|
| 157 |
+
return_dict=True,
|
| 158 |
+
reduction='none')
|
| 159 |
+
logits = start_output.logits[:, 0, :] # first token's logit
|
| 160 |
+
|
| 161 |
+
# topk_probs: top-k probability
|
| 162 |
+
# topk_ids: [num_question, k]
|
| 163 |
+
answer_first_token = answer_ids[:, 1]
|
| 164 |
+
prob_first_token = F.softmax(logits, dim=1).index_select(dim=1, index=answer_first_token)
|
| 165 |
+
topk_probs, topk_ids = prob_first_token.topk(k, dim=1)
|
| 166 |
+
|
| 167 |
+
# answer input: [num_question*k, answer_len]
|
| 168 |
+
input_ids = []
|
| 169 |
+
input_atts = []
|
| 170 |
+
for b, topk_id in enumerate(topk_ids):
|
| 171 |
+
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
|
| 172 |
+
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
|
| 173 |
+
input_ids = torch.cat(input_ids, dim=0)
|
| 174 |
+
input_atts = torch.cat(input_atts, dim=0)
|
| 175 |
+
|
| 176 |
+
targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
|
| 177 |
+
|
| 178 |
+
# repeat encoder's output for top-k answers
|
| 179 |
+
question_states = tile(question_states, 0, k)
|
| 180 |
+
question_atts = tile(question_atts, 0, k)
|
| 181 |
+
|
| 182 |
+
output = self.text_decoder(input_ids,
|
| 183 |
+
attention_mask=input_atts,
|
| 184 |
+
encoder_hidden_states=question_states,
|
| 185 |
+
encoder_attention_mask=question_atts,
|
| 186 |
+
labels=targets_ids,
|
| 187 |
+
return_dict=True,
|
| 188 |
+
reduction='none')
|
| 189 |
+
|
| 190 |
+
log_probs_sum = -output.loss
|
| 191 |
+
log_probs_sum = log_probs_sum.view(num_ques, k)
|
| 192 |
+
|
| 193 |
+
max_topk_ids = log_probs_sum.argmax(dim=1)
|
| 194 |
+
max_ids = topk_ids[max_topk_ids >= 0, max_topk_ids]
|
| 195 |
+
|
| 196 |
+
return max_ids
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def blip_vqa(pretrained='', **kwargs):
|
| 200 |
+
model = BLIP_VQA(**kwargs)
|
| 201 |
+
if pretrained:
|
| 202 |
+
model, msg = load_checkpoint(model, pretrained)
|
| 203 |
+
# assert(len(msg.missing_keys)==0)
|
| 204 |
+
return model
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def tile(x, dim, n_tile):
|
| 208 |
+
init_dim = x.size(dim)
|
| 209 |
+
repeat_idx = [1] * x.dim()
|
| 210 |
+
repeat_idx[dim] = n_tile
|
| 211 |
+
x = x.repeat(*(repeat_idx))
|
| 212 |
+
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
|
| 213 |
+
return torch.index_select(x, dim, order_index.to(x.device))
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/med.py
ADDED
|
@@ -0,0 +1,955 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 6 |
+
* By Junnan Li
|
| 7 |
+
* Based on huggingface code base
|
| 8 |
+
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
|
| 9 |
+
'''
|
| 10 |
+
|
| 11 |
+
import math
|
| 12 |
+
import os
|
| 13 |
+
import warnings
|
| 14 |
+
from dataclasses import dataclass
|
| 15 |
+
from typing import Optional, Tuple
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
from torch import Tensor, device, dtype, nn
|
| 19 |
+
import torch.utils.checkpoint
|
| 20 |
+
from torch import nn
|
| 21 |
+
from torch.nn import CrossEntropyLoss
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
|
| 24 |
+
from transformers.activations import ACT2FN
|
| 25 |
+
from transformers.file_utils import (
|
| 26 |
+
ModelOutput,
|
| 27 |
+
)
|
| 28 |
+
from transformers.modeling_outputs import (
|
| 29 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
| 30 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
| 31 |
+
CausalLMOutputWithCrossAttentions,
|
| 32 |
+
MaskedLMOutput,
|
| 33 |
+
MultipleChoiceModelOutput,
|
| 34 |
+
NextSentencePredictorOutput,
|
| 35 |
+
QuestionAnsweringModelOutput,
|
| 36 |
+
SequenceClassifierOutput,
|
| 37 |
+
TokenClassifierOutput,
|
| 38 |
+
)
|
| 39 |
+
from transformers.modeling_utils import (
|
| 40 |
+
PreTrainedModel,
|
| 41 |
+
apply_chunking_to_forward,
|
| 42 |
+
find_pruneable_heads_and_indices,
|
| 43 |
+
prune_linear_layer,
|
| 44 |
+
)
|
| 45 |
+
from transformers.utils import logging
|
| 46 |
+
from transformers.models.bert.configuration_bert import BertConfig
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
logger = logging.get_logger(__name__)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class BertEmbeddings(nn.Module):
|
| 53 |
+
"""Construct the embeddings from word and position embeddings."""
|
| 54 |
+
|
| 55 |
+
def __init__(self, config):
|
| 56 |
+
super().__init__()
|
| 57 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
| 58 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
| 59 |
+
|
| 60 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 61 |
+
# any TensorFlow checkpoint file
|
| 62 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 63 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 64 |
+
|
| 65 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
| 66 |
+
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
| 67 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
| 68 |
+
|
| 69 |
+
self.config = config
|
| 70 |
+
|
| 71 |
+
def forward(
|
| 72 |
+
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
| 73 |
+
):
|
| 74 |
+
if input_ids is not None:
|
| 75 |
+
input_shape = input_ids.size()
|
| 76 |
+
else:
|
| 77 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 78 |
+
|
| 79 |
+
seq_length = input_shape[1]
|
| 80 |
+
|
| 81 |
+
if position_ids is None:
|
| 82 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
| 83 |
+
|
| 84 |
+
if inputs_embeds is None:
|
| 85 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 86 |
+
|
| 87 |
+
embeddings = inputs_embeds
|
| 88 |
+
|
| 89 |
+
if self.position_embedding_type == "absolute":
|
| 90 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 91 |
+
embeddings += position_embeddings
|
| 92 |
+
embeddings = self.LayerNorm(embeddings)
|
| 93 |
+
embeddings = self.dropout(embeddings)
|
| 94 |
+
return embeddings
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class BertSelfAttention(nn.Module):
|
| 98 |
+
def __init__(self, config, is_cross_attention):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.config = config
|
| 101 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 102 |
+
raise ValueError(
|
| 103 |
+
"The hidden size (%d) is not a multiple of the number of attention "
|
| 104 |
+
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
self.num_attention_heads = config.num_attention_heads
|
| 108 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 109 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 110 |
+
|
| 111 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
| 112 |
+
if is_cross_attention:
|
| 113 |
+
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
| 114 |
+
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
| 115 |
+
else:
|
| 116 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
| 117 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
| 118 |
+
|
| 119 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 120 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
| 121 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 122 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 123 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
| 124 |
+
self.save_attention = False
|
| 125 |
+
|
| 126 |
+
def save_attn_gradients(self, attn_gradients):
|
| 127 |
+
self.attn_gradients = attn_gradients
|
| 128 |
+
|
| 129 |
+
def get_attn_gradients(self):
|
| 130 |
+
return self.attn_gradients
|
| 131 |
+
|
| 132 |
+
def save_attention_map(self, attention_map):
|
| 133 |
+
self.attention_map = attention_map
|
| 134 |
+
|
| 135 |
+
def get_attention_map(self):
|
| 136 |
+
return self.attention_map
|
| 137 |
+
|
| 138 |
+
def transpose_for_scores(self, x):
|
| 139 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
| 140 |
+
x = x.view(*new_x_shape)
|
| 141 |
+
return x.permute(0, 2, 1, 3)
|
| 142 |
+
|
| 143 |
+
def forward(
|
| 144 |
+
self,
|
| 145 |
+
hidden_states,
|
| 146 |
+
attention_mask=None,
|
| 147 |
+
head_mask=None,
|
| 148 |
+
encoder_hidden_states=None,
|
| 149 |
+
encoder_attention_mask=None,
|
| 150 |
+
past_key_value=None,
|
| 151 |
+
output_attentions=False,
|
| 152 |
+
):
|
| 153 |
+
mixed_query_layer = self.query(hidden_states)
|
| 154 |
+
|
| 155 |
+
# If this is instantiated as a cross-attention module, the keys
|
| 156 |
+
# and values come from an encoder; the attention mask needs to be
|
| 157 |
+
# such that the encoder's padding tokens are not attended to.
|
| 158 |
+
is_cross_attention = encoder_hidden_states is not None
|
| 159 |
+
|
| 160 |
+
if is_cross_attention:
|
| 161 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
| 162 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
| 163 |
+
attention_mask = encoder_attention_mask
|
| 164 |
+
elif past_key_value is not None:
|
| 165 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 166 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 167 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
| 168 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
| 169 |
+
else:
|
| 170 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
| 171 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
| 172 |
+
|
| 173 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
| 174 |
+
|
| 175 |
+
past_key_value = (key_layer, value_layer)
|
| 176 |
+
|
| 177 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 178 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
| 179 |
+
|
| 180 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
| 181 |
+
seq_length = hidden_states.size()[1]
|
| 182 |
+
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
| 183 |
+
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
| 184 |
+
distance = position_ids_l - position_ids_r
|
| 185 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
| 186 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
| 187 |
+
|
| 188 |
+
if self.position_embedding_type == "relative_key":
|
| 189 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 190 |
+
attention_scores = attention_scores + relative_position_scores
|
| 191 |
+
elif self.position_embedding_type == "relative_key_query":
|
| 192 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
| 193 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
| 194 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
| 195 |
+
|
| 196 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 197 |
+
if attention_mask is not None:
|
| 198 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 199 |
+
attention_scores = attention_scores + attention_mask
|
| 200 |
+
|
| 201 |
+
# Normalize the attention scores to probabilities.
|
| 202 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
| 203 |
+
|
| 204 |
+
if is_cross_attention and self.save_attention:
|
| 205 |
+
self.save_attention_map(attention_probs)
|
| 206 |
+
attention_probs.register_hook(self.save_attn_gradients)
|
| 207 |
+
|
| 208 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 209 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 210 |
+
attention_probs_dropped = self.dropout(attention_probs)
|
| 211 |
+
|
| 212 |
+
# Mask heads if we want to
|
| 213 |
+
if head_mask is not None:
|
| 214 |
+
attention_probs_dropped = attention_probs_dropped * head_mask
|
| 215 |
+
|
| 216 |
+
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
| 217 |
+
|
| 218 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 219 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 220 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
| 221 |
+
|
| 222 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
| 223 |
+
|
| 224 |
+
outputs = outputs + (past_key_value,)
|
| 225 |
+
return outputs
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class BertSelfOutput(nn.Module):
|
| 229 |
+
def __init__(self, config):
|
| 230 |
+
super().__init__()
|
| 231 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 232 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 233 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 234 |
+
|
| 235 |
+
def forward(self, hidden_states, input_tensor):
|
| 236 |
+
hidden_states = self.dense(hidden_states)
|
| 237 |
+
hidden_states = self.dropout(hidden_states)
|
| 238 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 239 |
+
return hidden_states
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class BertAttention(nn.Module):
|
| 243 |
+
def __init__(self, config, is_cross_attention=False):
|
| 244 |
+
super().__init__()
|
| 245 |
+
self.self = BertSelfAttention(config, is_cross_attention)
|
| 246 |
+
self.output = BertSelfOutput(config)
|
| 247 |
+
self.pruned_heads = set()
|
| 248 |
+
|
| 249 |
+
def prune_heads(self, heads):
|
| 250 |
+
if len(heads) == 0:
|
| 251 |
+
return
|
| 252 |
+
heads, index = find_pruneable_heads_and_indices(
|
| 253 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
# Prune linear layers
|
| 257 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
| 258 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
| 259 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
| 260 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
| 261 |
+
|
| 262 |
+
# Update hyper params and store pruned heads
|
| 263 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
| 264 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
| 265 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
| 266 |
+
|
| 267 |
+
def forward(
|
| 268 |
+
self,
|
| 269 |
+
hidden_states,
|
| 270 |
+
attention_mask=None,
|
| 271 |
+
head_mask=None,
|
| 272 |
+
encoder_hidden_states=None,
|
| 273 |
+
encoder_attention_mask=None,
|
| 274 |
+
past_key_value=None,
|
| 275 |
+
output_attentions=False,
|
| 276 |
+
):
|
| 277 |
+
self_outputs = self.self(
|
| 278 |
+
hidden_states,
|
| 279 |
+
attention_mask,
|
| 280 |
+
head_mask,
|
| 281 |
+
encoder_hidden_states,
|
| 282 |
+
encoder_attention_mask,
|
| 283 |
+
past_key_value,
|
| 284 |
+
output_attentions,
|
| 285 |
+
)
|
| 286 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
| 287 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
| 288 |
+
return outputs
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class BertIntermediate(nn.Module):
|
| 292 |
+
def __init__(self, config):
|
| 293 |
+
super().__init__()
|
| 294 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
| 295 |
+
if isinstance(config.hidden_act, str):
|
| 296 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
| 297 |
+
else:
|
| 298 |
+
self.intermediate_act_fn = config.hidden_act
|
| 299 |
+
|
| 300 |
+
def forward(self, hidden_states):
|
| 301 |
+
hidden_states = self.dense(hidden_states)
|
| 302 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
| 303 |
+
return hidden_states
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
class BertOutput(nn.Module):
|
| 307 |
+
def __init__(self, config):
|
| 308 |
+
super().__init__()
|
| 309 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
| 310 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 311 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 312 |
+
|
| 313 |
+
def forward(self, hidden_states, input_tensor):
|
| 314 |
+
hidden_states = self.dense(hidden_states)
|
| 315 |
+
hidden_states = self.dropout(hidden_states)
|
| 316 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
| 317 |
+
return hidden_states
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class BertLayer(nn.Module):
|
| 321 |
+
def __init__(self, config, layer_num):
|
| 322 |
+
super().__init__()
|
| 323 |
+
self.config = config
|
| 324 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
| 325 |
+
self.seq_len_dim = 1
|
| 326 |
+
self.attention = BertAttention(config)
|
| 327 |
+
self.layer_num = layer_num
|
| 328 |
+
if self.config.add_cross_attention:
|
| 329 |
+
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
|
| 330 |
+
self.intermediate = BertIntermediate(config)
|
| 331 |
+
self.output = BertOutput(config)
|
| 332 |
+
|
| 333 |
+
def forward(
|
| 334 |
+
self,
|
| 335 |
+
hidden_states,
|
| 336 |
+
attention_mask=None,
|
| 337 |
+
head_mask=None,
|
| 338 |
+
encoder_hidden_states=None,
|
| 339 |
+
encoder_attention_mask=None,
|
| 340 |
+
past_key_value=None,
|
| 341 |
+
output_attentions=False,
|
| 342 |
+
mode=None,
|
| 343 |
+
):
|
| 344 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
| 345 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
| 346 |
+
self_attention_outputs = self.attention(
|
| 347 |
+
hidden_states,
|
| 348 |
+
attention_mask,
|
| 349 |
+
head_mask,
|
| 350 |
+
output_attentions=output_attentions,
|
| 351 |
+
past_key_value=self_attn_past_key_value,
|
| 352 |
+
)
|
| 353 |
+
attention_output = self_attention_outputs[0]
|
| 354 |
+
|
| 355 |
+
outputs = self_attention_outputs[1:-1]
|
| 356 |
+
present_key_value = self_attention_outputs[-1]
|
| 357 |
+
|
| 358 |
+
if mode=='multimodal':
|
| 359 |
+
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
| 360 |
+
|
| 361 |
+
cross_attention_outputs = self.crossattention(
|
| 362 |
+
attention_output,
|
| 363 |
+
attention_mask,
|
| 364 |
+
head_mask,
|
| 365 |
+
encoder_hidden_states,
|
| 366 |
+
encoder_attention_mask,
|
| 367 |
+
output_attentions=output_attentions,
|
| 368 |
+
)
|
| 369 |
+
attention_output = cross_attention_outputs[0]
|
| 370 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
| 371 |
+
layer_output = apply_chunking_to_forward(
|
| 372 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
| 373 |
+
)
|
| 374 |
+
outputs = (layer_output,) + outputs
|
| 375 |
+
|
| 376 |
+
outputs = outputs + (present_key_value,)
|
| 377 |
+
|
| 378 |
+
return outputs
|
| 379 |
+
|
| 380 |
+
def feed_forward_chunk(self, attention_output):
|
| 381 |
+
intermediate_output = self.intermediate(attention_output)
|
| 382 |
+
layer_output = self.output(intermediate_output, attention_output)
|
| 383 |
+
return layer_output
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
class BertEncoder(nn.Module):
|
| 387 |
+
def __init__(self, config):
|
| 388 |
+
super().__init__()
|
| 389 |
+
self.config = config
|
| 390 |
+
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
| 391 |
+
self.gradient_checkpointing = False
|
| 392 |
+
|
| 393 |
+
def forward(
|
| 394 |
+
self,
|
| 395 |
+
hidden_states,
|
| 396 |
+
attention_mask=None,
|
| 397 |
+
head_mask=None,
|
| 398 |
+
encoder_hidden_states=None,
|
| 399 |
+
encoder_attention_mask=None,
|
| 400 |
+
past_key_values=None,
|
| 401 |
+
use_cache=None,
|
| 402 |
+
output_attentions=False,
|
| 403 |
+
output_hidden_states=False,
|
| 404 |
+
return_dict=True,
|
| 405 |
+
mode='multimodal',
|
| 406 |
+
):
|
| 407 |
+
all_hidden_states = () if output_hidden_states else None
|
| 408 |
+
all_self_attentions = () if output_attentions else None
|
| 409 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
| 410 |
+
|
| 411 |
+
next_decoder_cache = () if use_cache else None
|
| 412 |
+
|
| 413 |
+
for i in range(self.config.num_hidden_layers):
|
| 414 |
+
layer_module = self.layer[i]
|
| 415 |
+
if output_hidden_states:
|
| 416 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 417 |
+
|
| 418 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 419 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
| 420 |
+
|
| 421 |
+
if self.gradient_checkpointing and self.training:
|
| 422 |
+
|
| 423 |
+
if use_cache:
|
| 424 |
+
logger.warn(
|
| 425 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| 426 |
+
)
|
| 427 |
+
use_cache = False
|
| 428 |
+
|
| 429 |
+
def create_custom_forward(module):
|
| 430 |
+
def custom_forward(*inputs):
|
| 431 |
+
return module(*inputs, past_key_value, output_attentions)
|
| 432 |
+
|
| 433 |
+
return custom_forward
|
| 434 |
+
|
| 435 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
| 436 |
+
create_custom_forward(layer_module),
|
| 437 |
+
hidden_states,
|
| 438 |
+
attention_mask,
|
| 439 |
+
layer_head_mask,
|
| 440 |
+
encoder_hidden_states,
|
| 441 |
+
encoder_attention_mask,
|
| 442 |
+
mode=mode,
|
| 443 |
+
)
|
| 444 |
+
else:
|
| 445 |
+
layer_outputs = layer_module(
|
| 446 |
+
hidden_states,
|
| 447 |
+
attention_mask,
|
| 448 |
+
layer_head_mask,
|
| 449 |
+
encoder_hidden_states,
|
| 450 |
+
encoder_attention_mask,
|
| 451 |
+
past_key_value,
|
| 452 |
+
output_attentions,
|
| 453 |
+
mode=mode,
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
hidden_states = layer_outputs[0]
|
| 457 |
+
if use_cache:
|
| 458 |
+
next_decoder_cache += (layer_outputs[-1],)
|
| 459 |
+
if output_attentions:
|
| 460 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 461 |
+
|
| 462 |
+
if output_hidden_states:
|
| 463 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 464 |
+
|
| 465 |
+
if not return_dict:
|
| 466 |
+
return tuple(
|
| 467 |
+
v
|
| 468 |
+
for v in [
|
| 469 |
+
hidden_states,
|
| 470 |
+
next_decoder_cache,
|
| 471 |
+
all_hidden_states,
|
| 472 |
+
all_self_attentions,
|
| 473 |
+
all_cross_attentions,
|
| 474 |
+
]
|
| 475 |
+
if v is not None
|
| 476 |
+
)
|
| 477 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
| 478 |
+
last_hidden_state=hidden_states,
|
| 479 |
+
past_key_values=next_decoder_cache,
|
| 480 |
+
hidden_states=all_hidden_states,
|
| 481 |
+
attentions=all_self_attentions,
|
| 482 |
+
cross_attentions=all_cross_attentions,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class BertPooler(nn.Module):
|
| 487 |
+
def __init__(self, config):
|
| 488 |
+
super().__init__()
|
| 489 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 490 |
+
self.activation = nn.Tanh()
|
| 491 |
+
|
| 492 |
+
def forward(self, hidden_states):
|
| 493 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 494 |
+
# to the first token.
|
| 495 |
+
first_token_tensor = hidden_states[:, 0]
|
| 496 |
+
pooled_output = self.dense(first_token_tensor)
|
| 497 |
+
pooled_output = self.activation(pooled_output)
|
| 498 |
+
return pooled_output
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class BertPredictionHeadTransform(nn.Module):
|
| 502 |
+
def __init__(self, config):
|
| 503 |
+
super().__init__()
|
| 504 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 505 |
+
if isinstance(config.hidden_act, str):
|
| 506 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 507 |
+
else:
|
| 508 |
+
self.transform_act_fn = config.hidden_act
|
| 509 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 510 |
+
|
| 511 |
+
def forward(self, hidden_states):
|
| 512 |
+
hidden_states = self.dense(hidden_states)
|
| 513 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 514 |
+
hidden_states = self.LayerNorm(hidden_states)
|
| 515 |
+
return hidden_states
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
class BertLMPredictionHead(nn.Module):
|
| 519 |
+
def __init__(self, config):
|
| 520 |
+
super().__init__()
|
| 521 |
+
self.transform = BertPredictionHeadTransform(config)
|
| 522 |
+
|
| 523 |
+
# The output weights are the same as the input embeddings, but there is
|
| 524 |
+
# an output-only bias for each token.
|
| 525 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 526 |
+
|
| 527 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
| 528 |
+
|
| 529 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
| 530 |
+
self.decoder.bias = self.bias
|
| 531 |
+
|
| 532 |
+
def forward(self, hidden_states):
|
| 533 |
+
hidden_states = self.transform(hidden_states)
|
| 534 |
+
hidden_states = self.decoder(hidden_states)
|
| 535 |
+
return hidden_states
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class BertOnlyMLMHead(nn.Module):
|
| 539 |
+
def __init__(self, config):
|
| 540 |
+
super().__init__()
|
| 541 |
+
self.predictions = BertLMPredictionHead(config)
|
| 542 |
+
|
| 543 |
+
def forward(self, sequence_output):
|
| 544 |
+
prediction_scores = self.predictions(sequence_output)
|
| 545 |
+
return prediction_scores
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
class BertPreTrainedModel(PreTrainedModel):
|
| 549 |
+
"""
|
| 550 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 551 |
+
models.
|
| 552 |
+
"""
|
| 553 |
+
|
| 554 |
+
config_class = BertConfig
|
| 555 |
+
base_model_prefix = "bert"
|
| 556 |
+
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
| 557 |
+
|
| 558 |
+
def _init_weights(self, module):
|
| 559 |
+
""" Initialize the weights """
|
| 560 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
| 561 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 562 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 563 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 564 |
+
elif isinstance(module, nn.LayerNorm):
|
| 565 |
+
module.bias.data.zero_()
|
| 566 |
+
module.weight.data.fill_(1.0)
|
| 567 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
| 568 |
+
module.bias.data.zero_()
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
class BertModel(BertPreTrainedModel):
|
| 572 |
+
"""
|
| 573 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
| 574 |
+
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
| 575 |
+
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
| 576 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
| 577 |
+
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
| 578 |
+
input to the forward pass.
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
def __init__(self, config, add_pooling_layer=True):
|
| 582 |
+
super().__init__(config)
|
| 583 |
+
self.config = config
|
| 584 |
+
|
| 585 |
+
self.embeddings = BertEmbeddings(config)
|
| 586 |
+
|
| 587 |
+
self.encoder = BertEncoder(config)
|
| 588 |
+
|
| 589 |
+
self.pooler = BertPooler(config) if add_pooling_layer else None
|
| 590 |
+
|
| 591 |
+
self.init_weights()
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def get_input_embeddings(self):
|
| 595 |
+
return self.embeddings.word_embeddings
|
| 596 |
+
|
| 597 |
+
def set_input_embeddings(self, value):
|
| 598 |
+
self.embeddings.word_embeddings = value
|
| 599 |
+
|
| 600 |
+
def _prune_heads(self, heads_to_prune):
|
| 601 |
+
"""
|
| 602 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
| 603 |
+
class PreTrainedModel
|
| 604 |
+
"""
|
| 605 |
+
for layer, heads in heads_to_prune.items():
|
| 606 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
| 610 |
+
"""
|
| 611 |
+
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
| 612 |
+
|
| 613 |
+
Arguments:
|
| 614 |
+
attention_mask (:obj:`torch.Tensor`):
|
| 615 |
+
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
| 616 |
+
input_shape (:obj:`Tuple[int]`):
|
| 617 |
+
The shape of the input to the model.
|
| 618 |
+
device: (:obj:`torch.device`):
|
| 619 |
+
The device of the input to the model.
|
| 620 |
+
|
| 621 |
+
Returns:
|
| 622 |
+
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
| 623 |
+
"""
|
| 624 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 625 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 626 |
+
if attention_mask.dim() == 3:
|
| 627 |
+
extended_attention_mask = attention_mask[:, None, :, :]
|
| 628 |
+
elif attention_mask.dim() == 2:
|
| 629 |
+
# Provided a padding mask of dimensions [batch_size, seq_length]
|
| 630 |
+
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
| 631 |
+
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 632 |
+
if is_decoder:
|
| 633 |
+
batch_size, seq_length = input_shape
|
| 634 |
+
|
| 635 |
+
seq_ids = torch.arange(seq_length, device=device)
|
| 636 |
+
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
| 637 |
+
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
| 638 |
+
# causal and attention masks must have same type with pytorch version < 1.3
|
| 639 |
+
causal_mask = causal_mask.to(attention_mask.dtype)
|
| 640 |
+
|
| 641 |
+
if causal_mask.shape[1] < attention_mask.shape[1]:
|
| 642 |
+
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
| 643 |
+
causal_mask = torch.cat(
|
| 644 |
+
[
|
| 645 |
+
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
| 646 |
+
causal_mask,
|
| 647 |
+
],
|
| 648 |
+
axis=-1,
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
| 652 |
+
else:
|
| 653 |
+
extended_attention_mask = attention_mask[:, None, None, :]
|
| 654 |
+
else:
|
| 655 |
+
raise ValueError(
|
| 656 |
+
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
| 657 |
+
input_shape, attention_mask.shape
|
| 658 |
+
)
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 662 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 663 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 664 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 665 |
+
# effectively the same as removing these entirely.
|
| 666 |
+
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
| 667 |
+
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
| 668 |
+
return extended_attention_mask
|
| 669 |
+
|
| 670 |
+
def forward(
|
| 671 |
+
self,
|
| 672 |
+
input_ids=None,
|
| 673 |
+
attention_mask=None,
|
| 674 |
+
position_ids=None,
|
| 675 |
+
head_mask=None,
|
| 676 |
+
inputs_embeds=None,
|
| 677 |
+
encoder_embeds=None,
|
| 678 |
+
encoder_hidden_states=None,
|
| 679 |
+
encoder_attention_mask=None,
|
| 680 |
+
past_key_values=None,
|
| 681 |
+
use_cache=None,
|
| 682 |
+
output_attentions=None,
|
| 683 |
+
output_hidden_states=None,
|
| 684 |
+
return_dict=None,
|
| 685 |
+
is_decoder=False,
|
| 686 |
+
mode='multimodal',
|
| 687 |
+
):
|
| 688 |
+
r"""
|
| 689 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
| 690 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
| 691 |
+
the model is configured as a decoder.
|
| 692 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 693 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
| 694 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
| 695 |
+
- 1 for tokens that are **not masked**,
|
| 696 |
+
- 0 for tokens that are **masked**.
|
| 697 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
| 698 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
| 699 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
| 700 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
| 701 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
| 702 |
+
use_cache (:obj:`bool`, `optional`):
|
| 703 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
| 704 |
+
decoding (see :obj:`past_key_values`).
|
| 705 |
+
"""
|
| 706 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 707 |
+
output_hidden_states = (
|
| 708 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 709 |
+
)
|
| 710 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 711 |
+
|
| 712 |
+
if is_decoder:
|
| 713 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 714 |
+
else:
|
| 715 |
+
use_cache = False
|
| 716 |
+
|
| 717 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 718 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 719 |
+
elif input_ids is not None:
|
| 720 |
+
input_shape = input_ids.size()
|
| 721 |
+
batch_size, seq_length = input_shape
|
| 722 |
+
device = input_ids.device
|
| 723 |
+
elif inputs_embeds is not None:
|
| 724 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 725 |
+
batch_size, seq_length = input_shape
|
| 726 |
+
device = inputs_embeds.device
|
| 727 |
+
elif encoder_embeds is not None:
|
| 728 |
+
input_shape = encoder_embeds.size()[:-1]
|
| 729 |
+
batch_size, seq_length = input_shape
|
| 730 |
+
device = encoder_embeds.device
|
| 731 |
+
else:
|
| 732 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
| 733 |
+
|
| 734 |
+
# past_key_values_length
|
| 735 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
| 736 |
+
|
| 737 |
+
if attention_mask is None:
|
| 738 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
| 739 |
+
|
| 740 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 741 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 742 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
| 743 |
+
device, is_decoder)
|
| 744 |
+
|
| 745 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
| 746 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
| 747 |
+
if encoder_hidden_states is not None:
|
| 748 |
+
if type(encoder_hidden_states) == list:
|
| 749 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
| 750 |
+
else:
|
| 751 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
| 752 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
| 753 |
+
|
| 754 |
+
if type(encoder_attention_mask) == list:
|
| 755 |
+
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
| 756 |
+
elif encoder_attention_mask is None:
|
| 757 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
| 758 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
| 759 |
+
else:
|
| 760 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
| 761 |
+
else:
|
| 762 |
+
encoder_extended_attention_mask = None
|
| 763 |
+
|
| 764 |
+
# Prepare head mask if needed
|
| 765 |
+
# 1.0 in head_mask indicate we keep the head
|
| 766 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 767 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 768 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 769 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 770 |
+
|
| 771 |
+
if encoder_embeds is None:
|
| 772 |
+
embedding_output = self.embeddings(
|
| 773 |
+
input_ids=input_ids,
|
| 774 |
+
position_ids=position_ids,
|
| 775 |
+
inputs_embeds=inputs_embeds,
|
| 776 |
+
past_key_values_length=past_key_values_length,
|
| 777 |
+
)
|
| 778 |
+
else:
|
| 779 |
+
embedding_output = encoder_embeds
|
| 780 |
+
|
| 781 |
+
encoder_outputs = self.encoder(
|
| 782 |
+
embedding_output,
|
| 783 |
+
attention_mask=extended_attention_mask,
|
| 784 |
+
head_mask=head_mask,
|
| 785 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 786 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
| 787 |
+
past_key_values=past_key_values,
|
| 788 |
+
use_cache=use_cache,
|
| 789 |
+
output_attentions=output_attentions,
|
| 790 |
+
output_hidden_states=output_hidden_states,
|
| 791 |
+
return_dict=return_dict,
|
| 792 |
+
mode=mode,
|
| 793 |
+
)
|
| 794 |
+
sequence_output = encoder_outputs[0]
|
| 795 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 796 |
+
|
| 797 |
+
if not return_dict:
|
| 798 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 799 |
+
|
| 800 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
| 801 |
+
last_hidden_state=sequence_output,
|
| 802 |
+
pooler_output=pooled_output,
|
| 803 |
+
past_key_values=encoder_outputs.past_key_values,
|
| 804 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 805 |
+
attentions=encoder_outputs.attentions,
|
| 806 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
class BertLMHeadModel(BertPreTrainedModel):
|
| 812 |
+
|
| 813 |
+
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
| 814 |
+
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
| 815 |
+
|
| 816 |
+
def __init__(self, config):
|
| 817 |
+
super().__init__(config)
|
| 818 |
+
|
| 819 |
+
self.bert = BertModel(config, add_pooling_layer=False)
|
| 820 |
+
self.cls = BertOnlyMLMHead(config)
|
| 821 |
+
|
| 822 |
+
self.init_weights()
|
| 823 |
+
|
| 824 |
+
def get_output_embeddings(self):
|
| 825 |
+
return self.cls.predictions.decoder
|
| 826 |
+
|
| 827 |
+
def set_output_embeddings(self, new_embeddings):
|
| 828 |
+
self.cls.predictions.decoder = new_embeddings
|
| 829 |
+
|
| 830 |
+
def forward(
|
| 831 |
+
self,
|
| 832 |
+
input_ids=None,
|
| 833 |
+
attention_mask=None,
|
| 834 |
+
position_ids=None,
|
| 835 |
+
head_mask=None,
|
| 836 |
+
inputs_embeds=None,
|
| 837 |
+
encoder_hidden_states=None,
|
| 838 |
+
encoder_attention_mask=None,
|
| 839 |
+
labels=None,
|
| 840 |
+
past_key_values=None,
|
| 841 |
+
use_cache=None,
|
| 842 |
+
output_attentions=None,
|
| 843 |
+
output_hidden_states=None,
|
| 844 |
+
return_dict=None,
|
| 845 |
+
return_logits=False,
|
| 846 |
+
is_decoder=True,
|
| 847 |
+
reduction='mean',
|
| 848 |
+
mode='multimodal',
|
| 849 |
+
):
|
| 850 |
+
r"""
|
| 851 |
+
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
| 852 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
| 853 |
+
the model is configured as a decoder.
|
| 854 |
+
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 855 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
| 856 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
| 857 |
+
- 1 for tokens that are **not masked**,
|
| 858 |
+
- 0 for tokens that are **masked**.
|
| 859 |
+
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
| 860 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
| 861 |
+
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
|
| 862 |
+
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
|
| 863 |
+
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
| 864 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
| 865 |
+
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
| 866 |
+
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
| 867 |
+
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
| 868 |
+
use_cache (:obj:`bool`, `optional`):
|
| 869 |
+
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
| 870 |
+
decoding (see :obj:`past_key_values`).
|
| 871 |
+
Returns:
|
| 872 |
+
Example::
|
| 873 |
+
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
|
| 874 |
+
>>> import torch
|
| 875 |
+
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
| 876 |
+
>>> config = BertConfig.from_pretrained("bert-base-cased")
|
| 877 |
+
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
|
| 878 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
| 879 |
+
>>> outputs = model(**inputs)
|
| 880 |
+
>>> prediction_logits = outputs.logits
|
| 881 |
+
"""
|
| 882 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 883 |
+
if labels is not None:
|
| 884 |
+
use_cache = False
|
| 885 |
+
|
| 886 |
+
outputs = self.bert(
|
| 887 |
+
input_ids,
|
| 888 |
+
attention_mask=attention_mask,
|
| 889 |
+
position_ids=position_ids,
|
| 890 |
+
head_mask=head_mask,
|
| 891 |
+
inputs_embeds=inputs_embeds,
|
| 892 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 893 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 894 |
+
past_key_values=past_key_values,
|
| 895 |
+
use_cache=use_cache,
|
| 896 |
+
output_attentions=output_attentions,
|
| 897 |
+
output_hidden_states=output_hidden_states,
|
| 898 |
+
return_dict=return_dict,
|
| 899 |
+
is_decoder=is_decoder,
|
| 900 |
+
mode=mode,
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
sequence_output = outputs[0]
|
| 904 |
+
prediction_scores = self.cls(sequence_output)
|
| 905 |
+
|
| 906 |
+
if return_logits:
|
| 907 |
+
return prediction_scores[:, :-1, :].contiguous()
|
| 908 |
+
|
| 909 |
+
lm_loss = None
|
| 910 |
+
if labels is not None:
|
| 911 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
| 912 |
+
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
| 913 |
+
labels = labels[:, 1:].contiguous()
|
| 914 |
+
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
|
| 915 |
+
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
| 916 |
+
if reduction=='none':
|
| 917 |
+
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
|
| 918 |
+
|
| 919 |
+
if not return_dict:
|
| 920 |
+
output = (prediction_scores,) + outputs[2:]
|
| 921 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
| 922 |
+
|
| 923 |
+
return CausalLMOutputWithCrossAttentions(
|
| 924 |
+
loss=lm_loss,
|
| 925 |
+
logits=prediction_scores,
|
| 926 |
+
past_key_values=outputs.past_key_values,
|
| 927 |
+
hidden_states=outputs.hidden_states,
|
| 928 |
+
attentions=outputs.attentions,
|
| 929 |
+
cross_attentions=outputs.cross_attentions,
|
| 930 |
+
)
|
| 931 |
+
|
| 932 |
+
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
|
| 933 |
+
input_shape = input_ids.shape
|
| 934 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
| 935 |
+
if attention_mask is None:
|
| 936 |
+
attention_mask = input_ids.new_ones(input_shape)
|
| 937 |
+
|
| 938 |
+
# cut decoder_input_ids if past is used
|
| 939 |
+
if past is not None:
|
| 940 |
+
input_ids = input_ids[:, -1:]
|
| 941 |
+
|
| 942 |
+
return {
|
| 943 |
+
"input_ids": input_ids,
|
| 944 |
+
"attention_mask": attention_mask,
|
| 945 |
+
"past_key_values": past,
|
| 946 |
+
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
|
| 947 |
+
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
|
| 948 |
+
"is_decoder": True,
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
def _reorder_cache(self, past, beam_idx):
|
| 952 |
+
reordered_past = ()
|
| 953 |
+
for layer_past in past:
|
| 954 |
+
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
| 955 |
+
return reordered_past
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/models/vit.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
* Copyright (c) 2022, salesforce.com, inc.
|
| 3 |
+
* All rights reserved.
|
| 4 |
+
* SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
| 6 |
+
* By Junnan Li
|
| 7 |
+
* Based on timm code base
|
| 8 |
+
* https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
| 9 |
+
'''
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from functools import partial
|
| 15 |
+
|
| 16 |
+
from timm.models.vision_transformer import _cfg, PatchEmbed
|
| 17 |
+
from timm.models.registry import register_model
|
| 18 |
+
from timm.models.layers import trunc_normal_, DropPath
|
| 19 |
+
from timm.models.helpers import named_apply, adapt_input_conv
|
| 20 |
+
|
| 21 |
+
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
|
| 22 |
+
|
| 23 |
+
class Mlp(nn.Module):
|
| 24 |
+
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
| 25 |
+
"""
|
| 26 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
| 27 |
+
super().__init__()
|
| 28 |
+
out_features = out_features or in_features
|
| 29 |
+
hidden_features = hidden_features or in_features
|
| 30 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 31 |
+
self.act = act_layer()
|
| 32 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 33 |
+
self.drop = nn.Dropout(drop)
|
| 34 |
+
|
| 35 |
+
def forward(self, x):
|
| 36 |
+
x = self.fc1(x)
|
| 37 |
+
x = self.act(x)
|
| 38 |
+
x = self.drop(x)
|
| 39 |
+
x = self.fc2(x)
|
| 40 |
+
x = self.drop(x)
|
| 41 |
+
return x
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Attention(nn.Module):
|
| 45 |
+
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
| 46 |
+
super().__init__()
|
| 47 |
+
self.num_heads = num_heads
|
| 48 |
+
head_dim = dim // num_heads
|
| 49 |
+
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
| 50 |
+
self.scale = qk_scale or head_dim ** -0.5
|
| 51 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 52 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 53 |
+
self.proj = nn.Linear(dim, dim)
|
| 54 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 55 |
+
self.attn_gradients = None
|
| 56 |
+
self.attention_map = None
|
| 57 |
+
|
| 58 |
+
def save_attn_gradients(self, attn_gradients):
|
| 59 |
+
self.attn_gradients = attn_gradients
|
| 60 |
+
|
| 61 |
+
def get_attn_gradients(self):
|
| 62 |
+
return self.attn_gradients
|
| 63 |
+
|
| 64 |
+
def save_attention_map(self, attention_map):
|
| 65 |
+
self.attention_map = attention_map
|
| 66 |
+
|
| 67 |
+
def get_attention_map(self):
|
| 68 |
+
return self.attention_map
|
| 69 |
+
|
| 70 |
+
def forward(self, x, register_hook=False):
|
| 71 |
+
B, N, C = x.shape
|
| 72 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 73 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
| 74 |
+
|
| 75 |
+
attn = (q @ k.transpose(-2, -1)) * self.scale
|
| 76 |
+
attn = attn.softmax(dim=-1)
|
| 77 |
+
attn = self.attn_drop(attn)
|
| 78 |
+
|
| 79 |
+
if register_hook:
|
| 80 |
+
self.save_attention_map(attn)
|
| 81 |
+
attn.register_hook(self.save_attn_gradients)
|
| 82 |
+
|
| 83 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 84 |
+
x = self.proj(x)
|
| 85 |
+
x = self.proj_drop(x)
|
| 86 |
+
return x
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class Block(nn.Module):
|
| 90 |
+
|
| 91 |
+
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
| 92 |
+
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False):
|
| 93 |
+
super().__init__()
|
| 94 |
+
self.norm1 = norm_layer(dim)
|
| 95 |
+
self.attn = Attention(
|
| 96 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
| 97 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
| 98 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 99 |
+
self.norm2 = norm_layer(dim)
|
| 100 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 101 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
| 102 |
+
|
| 103 |
+
if use_grad_checkpointing:
|
| 104 |
+
self.attn = checkpoint_wrapper(self.attn)
|
| 105 |
+
self.mlp = checkpoint_wrapper(self.mlp)
|
| 106 |
+
|
| 107 |
+
def forward(self, x, register_hook=False):
|
| 108 |
+
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
|
| 109 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
| 110 |
+
return x
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class VisionTransformer(nn.Module):
|
| 114 |
+
""" Vision Transformer
|
| 115 |
+
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
| 116 |
+
https://arxiv.org/abs/2010.11929
|
| 117 |
+
"""
|
| 118 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
| 119 |
+
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
|
| 120 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
|
| 121 |
+
use_grad_checkpointing=False, ckpt_layer=0):
|
| 122 |
+
"""
|
| 123 |
+
Args:
|
| 124 |
+
img_size (int, tuple): input image size
|
| 125 |
+
patch_size (int, tuple): patch size
|
| 126 |
+
in_chans (int): number of input channels
|
| 127 |
+
num_classes (int): number of classes for classification head
|
| 128 |
+
embed_dim (int): embedding dimension
|
| 129 |
+
depth (int): depth of transformer
|
| 130 |
+
num_heads (int): number of attention heads
|
| 131 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
| 132 |
+
qkv_bias (bool): enable bias for qkv if True
|
| 133 |
+
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
|
| 134 |
+
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
|
| 135 |
+
drop_rate (float): dropout rate
|
| 136 |
+
attn_drop_rate (float): attention dropout rate
|
| 137 |
+
drop_path_rate (float): stochastic depth rate
|
| 138 |
+
norm_layer: (nn.Module): normalization layer
|
| 139 |
+
"""
|
| 140 |
+
super().__init__()
|
| 141 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 142 |
+
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
| 143 |
+
|
| 144 |
+
self.patch_embed = PatchEmbed(
|
| 145 |
+
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 146 |
+
|
| 147 |
+
num_patches = self.patch_embed.num_patches
|
| 148 |
+
|
| 149 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 150 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
| 151 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
| 152 |
+
|
| 153 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 154 |
+
self.blocks = nn.ModuleList([
|
| 155 |
+
Block(
|
| 156 |
+
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
| 157 |
+
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
| 158 |
+
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
|
| 159 |
+
)
|
| 160 |
+
for i in range(depth)])
|
| 161 |
+
self.norm = norm_layer(embed_dim)
|
| 162 |
+
|
| 163 |
+
trunc_normal_(self.pos_embed, std=.02)
|
| 164 |
+
trunc_normal_(self.cls_token, std=.02)
|
| 165 |
+
self.apply(self._init_weights)
|
| 166 |
+
|
| 167 |
+
def _init_weights(self, m):
|
| 168 |
+
if isinstance(m, nn.Linear):
|
| 169 |
+
trunc_normal_(m.weight, std=.02)
|
| 170 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 171 |
+
nn.init.constant_(m.bias, 0)
|
| 172 |
+
elif isinstance(m, nn.LayerNorm):
|
| 173 |
+
nn.init.constant_(m.bias, 0)
|
| 174 |
+
nn.init.constant_(m.weight, 1.0)
|
| 175 |
+
|
| 176 |
+
@torch.jit.ignore
|
| 177 |
+
def no_weight_decay(self):
|
| 178 |
+
return {'pos_embed', 'cls_token'}
|
| 179 |
+
|
| 180 |
+
def forward(self, x, register_blk=-1):
|
| 181 |
+
B = x.shape[0]
|
| 182 |
+
x = self.patch_embed(x)
|
| 183 |
+
|
| 184 |
+
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
| 185 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 186 |
+
|
| 187 |
+
x = x + self.pos_embed[:,:x.size(1),:]
|
| 188 |
+
x = self.pos_drop(x)
|
| 189 |
+
|
| 190 |
+
for i,blk in enumerate(self.blocks):
|
| 191 |
+
x = blk(x, register_blk==i)
|
| 192 |
+
x = self.norm(x)
|
| 193 |
+
|
| 194 |
+
return x
|
| 195 |
+
|
| 196 |
+
@torch.jit.ignore()
|
| 197 |
+
def load_pretrained(self, checkpoint_path, prefix=''):
|
| 198 |
+
_load_weights(self, checkpoint_path, prefix)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
@torch.no_grad()
|
| 202 |
+
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
|
| 203 |
+
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
|
| 204 |
+
"""
|
| 205 |
+
import numpy as np
|
| 206 |
+
|
| 207 |
+
def _n2p(w, t=True):
|
| 208 |
+
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
|
| 209 |
+
w = w.flatten()
|
| 210 |
+
if t:
|
| 211 |
+
if w.ndim == 4:
|
| 212 |
+
w = w.transpose([3, 2, 0, 1])
|
| 213 |
+
elif w.ndim == 3:
|
| 214 |
+
w = w.transpose([2, 0, 1])
|
| 215 |
+
elif w.ndim == 2:
|
| 216 |
+
w = w.transpose([1, 0])
|
| 217 |
+
return torch.from_numpy(w)
|
| 218 |
+
|
| 219 |
+
w = np.load(checkpoint_path)
|
| 220 |
+
if not prefix and 'opt/target/embedding/kernel' in w:
|
| 221 |
+
prefix = 'opt/target/'
|
| 222 |
+
|
| 223 |
+
if hasattr(model.patch_embed, 'backbone'):
|
| 224 |
+
# hybrid
|
| 225 |
+
backbone = model.patch_embed.backbone
|
| 226 |
+
stem_only = not hasattr(backbone, 'stem')
|
| 227 |
+
stem = backbone if stem_only else backbone.stem
|
| 228 |
+
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
|
| 229 |
+
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
|
| 230 |
+
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
|
| 231 |
+
if not stem_only:
|
| 232 |
+
for i, stage in enumerate(backbone.stages):
|
| 233 |
+
for j, block in enumerate(stage.blocks):
|
| 234 |
+
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
|
| 235 |
+
for r in range(3):
|
| 236 |
+
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
|
| 237 |
+
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
|
| 238 |
+
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
|
| 239 |
+
if block.downsample is not None:
|
| 240 |
+
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
|
| 241 |
+
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
|
| 242 |
+
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
|
| 243 |
+
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
|
| 244 |
+
else:
|
| 245 |
+
embed_conv_w = adapt_input_conv(
|
| 246 |
+
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
|
| 247 |
+
model.patch_embed.proj.weight.copy_(embed_conv_w)
|
| 248 |
+
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
|
| 249 |
+
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
|
| 250 |
+
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
|
| 251 |
+
if pos_embed_w.shape != model.pos_embed.shape:
|
| 252 |
+
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
|
| 253 |
+
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
|
| 254 |
+
model.pos_embed.copy_(pos_embed_w)
|
| 255 |
+
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
|
| 256 |
+
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
|
| 257 |
+
# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
|
| 258 |
+
# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
|
| 259 |
+
# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
|
| 260 |
+
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
|
| 261 |
+
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
|
| 262 |
+
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
|
| 263 |
+
for i, block in enumerate(model.blocks.children()):
|
| 264 |
+
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
|
| 265 |
+
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
|
| 266 |
+
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
|
| 267 |
+
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
|
| 268 |
+
block.attn.qkv.weight.copy_(torch.cat([
|
| 269 |
+
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
|
| 270 |
+
block.attn.qkv.bias.copy_(torch.cat([
|
| 271 |
+
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
|
| 272 |
+
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
|
| 273 |
+
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
|
| 274 |
+
for r in range(2):
|
| 275 |
+
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
|
| 276 |
+
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
|
| 277 |
+
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
|
| 278 |
+
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
|
| 282 |
+
# interpolate position embedding
|
| 283 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 284 |
+
num_patches = visual_encoder.patch_embed.num_patches
|
| 285 |
+
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
|
| 286 |
+
# height (== width) for the checkpoint position embedding
|
| 287 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 288 |
+
# height (== width) for the new position embedding
|
| 289 |
+
new_size = int(num_patches ** 0.5)
|
| 290 |
+
|
| 291 |
+
if orig_size!=new_size:
|
| 292 |
+
# class_token and dist_token are kept unchanged
|
| 293 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 294 |
+
# only the position tokens are interpolated
|
| 295 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 296 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 297 |
+
pos_tokens = torch.nn.functional.interpolate(
|
| 298 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
| 299 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 300 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 301 |
+
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
|
| 302 |
+
|
| 303 |
+
return new_pos_embed
|
| 304 |
+
else:
|
| 305 |
+
return pos_embed_checkpoint
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/transform/randaugment.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
## aug functions
|
| 6 |
+
def identity_func(img):
|
| 7 |
+
return img
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def autocontrast_func(img, cutoff=0):
|
| 11 |
+
'''
|
| 12 |
+
same output as PIL.ImageOps.autocontrast
|
| 13 |
+
'''
|
| 14 |
+
n_bins = 256
|
| 15 |
+
|
| 16 |
+
def tune_channel(ch):
|
| 17 |
+
n = ch.size
|
| 18 |
+
cut = cutoff * n // 100
|
| 19 |
+
if cut == 0:
|
| 20 |
+
high, low = ch.max(), ch.min()
|
| 21 |
+
else:
|
| 22 |
+
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
|
| 23 |
+
low = np.argwhere(np.cumsum(hist) > cut)
|
| 24 |
+
low = 0 if low.shape[0] == 0 else low[0]
|
| 25 |
+
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
|
| 26 |
+
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
|
| 27 |
+
if high <= low:
|
| 28 |
+
table = np.arange(n_bins)
|
| 29 |
+
else:
|
| 30 |
+
scale = (n_bins - 1) / (high - low)
|
| 31 |
+
offset = -low * scale
|
| 32 |
+
table = np.arange(n_bins) * scale + offset
|
| 33 |
+
table[table < 0] = 0
|
| 34 |
+
table[table > n_bins - 1] = n_bins - 1
|
| 35 |
+
table = table.clip(0, 255).astype(np.uint8)
|
| 36 |
+
return table[ch]
|
| 37 |
+
|
| 38 |
+
channels = [tune_channel(ch) for ch in cv2.split(img)]
|
| 39 |
+
out = cv2.merge(channels)
|
| 40 |
+
return out
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def equalize_func(img):
|
| 44 |
+
'''
|
| 45 |
+
same output as PIL.ImageOps.equalize
|
| 46 |
+
PIL's implementation is different from cv2.equalize
|
| 47 |
+
'''
|
| 48 |
+
n_bins = 256
|
| 49 |
+
|
| 50 |
+
def tune_channel(ch):
|
| 51 |
+
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
|
| 52 |
+
non_zero_hist = hist[hist != 0].reshape(-1)
|
| 53 |
+
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
|
| 54 |
+
if step == 0: return ch
|
| 55 |
+
n = np.empty_like(hist)
|
| 56 |
+
n[0] = step // 2
|
| 57 |
+
n[1:] = hist[:-1]
|
| 58 |
+
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
|
| 59 |
+
return table[ch]
|
| 60 |
+
|
| 61 |
+
channels = [tune_channel(ch) for ch in cv2.split(img)]
|
| 62 |
+
out = cv2.merge(channels)
|
| 63 |
+
return out
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def rotate_func(img, degree, fill=(0, 0, 0)):
|
| 67 |
+
'''
|
| 68 |
+
like PIL, rotate by degree, not radians
|
| 69 |
+
'''
|
| 70 |
+
H, W = img.shape[0], img.shape[1]
|
| 71 |
+
center = W / 2, H / 2
|
| 72 |
+
M = cv2.getRotationMatrix2D(center, degree, 1)
|
| 73 |
+
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
|
| 74 |
+
return out
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def solarize_func(img, thresh=128):
|
| 78 |
+
'''
|
| 79 |
+
same output as PIL.ImageOps.posterize
|
| 80 |
+
'''
|
| 81 |
+
table = np.array([el if el < thresh else 255 - el for el in range(256)])
|
| 82 |
+
table = table.clip(0, 255).astype(np.uint8)
|
| 83 |
+
out = table[img]
|
| 84 |
+
return out
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def color_func(img, factor):
|
| 88 |
+
'''
|
| 89 |
+
same output as PIL.ImageEnhance.Color
|
| 90 |
+
'''
|
| 91 |
+
## implementation according to PIL definition, quite slow
|
| 92 |
+
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
|
| 93 |
+
# out = blend(degenerate, img, factor)
|
| 94 |
+
# M = (
|
| 95 |
+
# np.eye(3) * factor
|
| 96 |
+
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
|
| 97 |
+
# )[np.newaxis, np.newaxis, :]
|
| 98 |
+
M = (
|
| 99 |
+
np.float32([
|
| 100 |
+
[0.886, -0.114, -0.114],
|
| 101 |
+
[-0.587, 0.413, -0.587],
|
| 102 |
+
[-0.299, -0.299, 0.701]]) * factor
|
| 103 |
+
+ np.float32([[0.114], [0.587], [0.299]])
|
| 104 |
+
)
|
| 105 |
+
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
|
| 106 |
+
return out
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def contrast_func(img, factor):
|
| 110 |
+
"""
|
| 111 |
+
same output as PIL.ImageEnhance.Contrast
|
| 112 |
+
"""
|
| 113 |
+
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
|
| 114 |
+
table = np.array([(
|
| 115 |
+
el - mean) * factor + mean
|
| 116 |
+
for el in range(256)
|
| 117 |
+
]).clip(0, 255).astype(np.uint8)
|
| 118 |
+
out = table[img]
|
| 119 |
+
return out
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def brightness_func(img, factor):
|
| 123 |
+
'''
|
| 124 |
+
same output as PIL.ImageEnhance.Contrast
|
| 125 |
+
'''
|
| 126 |
+
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
|
| 127 |
+
out = table[img]
|
| 128 |
+
return out
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def sharpness_func(img, factor):
|
| 132 |
+
'''
|
| 133 |
+
The differences the this result and PIL are all on the 4 boundaries, the center
|
| 134 |
+
areas are same
|
| 135 |
+
'''
|
| 136 |
+
kernel = np.ones((3, 3), dtype=np.float32)
|
| 137 |
+
kernel[1][1] = 5
|
| 138 |
+
kernel /= 13
|
| 139 |
+
degenerate = cv2.filter2D(img, -1, kernel)
|
| 140 |
+
if factor == 0.0:
|
| 141 |
+
out = degenerate
|
| 142 |
+
elif factor == 1.0:
|
| 143 |
+
out = img
|
| 144 |
+
else:
|
| 145 |
+
out = img.astype(np.float32)
|
| 146 |
+
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
|
| 147 |
+
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
|
| 148 |
+
out = out.astype(np.uint8)
|
| 149 |
+
return out
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def shear_x_func(img, factor, fill=(0, 0, 0)):
|
| 153 |
+
H, W = img.shape[0], img.shape[1]
|
| 154 |
+
M = np.float32([[1, factor, 0], [0, 1, 0]])
|
| 155 |
+
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
| 156 |
+
return out
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def translate_x_func(img, offset, fill=(0, 0, 0)):
|
| 160 |
+
'''
|
| 161 |
+
same output as PIL.Image.transform
|
| 162 |
+
'''
|
| 163 |
+
H, W = img.shape[0], img.shape[1]
|
| 164 |
+
M = np.float32([[1, 0, -offset], [0, 1, 0]])
|
| 165 |
+
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
| 166 |
+
return out
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def translate_y_func(img, offset, fill=(0, 0, 0)):
|
| 170 |
+
'''
|
| 171 |
+
same output as PIL.Image.transform
|
| 172 |
+
'''
|
| 173 |
+
H, W = img.shape[0], img.shape[1]
|
| 174 |
+
M = np.float32([[1, 0, 0], [0, 1, -offset]])
|
| 175 |
+
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
| 176 |
+
return out
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def posterize_func(img, bits):
|
| 180 |
+
'''
|
| 181 |
+
same output as PIL.ImageOps.posterize
|
| 182 |
+
'''
|
| 183 |
+
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
|
| 184 |
+
return out
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def shear_y_func(img, factor, fill=(0, 0, 0)):
|
| 188 |
+
H, W = img.shape[0], img.shape[1]
|
| 189 |
+
M = np.float32([[1, 0, 0], [factor, 1, 0]])
|
| 190 |
+
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
| 191 |
+
return out
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def cutout_func(img, pad_size, replace=(0, 0, 0)):
|
| 195 |
+
replace = np.array(replace, dtype=np.uint8)
|
| 196 |
+
H, W = img.shape[0], img.shape[1]
|
| 197 |
+
rh, rw = np.random.random(2)
|
| 198 |
+
pad_size = pad_size // 2
|
| 199 |
+
ch, cw = int(rh * H), int(rw * W)
|
| 200 |
+
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
|
| 201 |
+
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
|
| 202 |
+
out = img.copy()
|
| 203 |
+
out[x1:x2, y1:y2, :] = replace
|
| 204 |
+
return out
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
### level to args
|
| 208 |
+
def enhance_level_to_args(MAX_LEVEL):
|
| 209 |
+
def level_to_args(level):
|
| 210 |
+
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
|
| 211 |
+
return level_to_args
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def shear_level_to_args(MAX_LEVEL, replace_value):
|
| 215 |
+
def level_to_args(level):
|
| 216 |
+
level = (level / MAX_LEVEL) * 0.3
|
| 217 |
+
if np.random.random() > 0.5: level = -level
|
| 218 |
+
return (level, replace_value)
|
| 219 |
+
|
| 220 |
+
return level_to_args
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
|
| 224 |
+
def level_to_args(level):
|
| 225 |
+
level = (level / MAX_LEVEL) * float(translate_const)
|
| 226 |
+
if np.random.random() > 0.5: level = -level
|
| 227 |
+
return (level, replace_value)
|
| 228 |
+
|
| 229 |
+
return level_to_args
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
|
| 233 |
+
def level_to_args(level):
|
| 234 |
+
level = int((level / MAX_LEVEL) * cutout_const)
|
| 235 |
+
return (level, replace_value)
|
| 236 |
+
|
| 237 |
+
return level_to_args
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def solarize_level_to_args(MAX_LEVEL):
|
| 241 |
+
def level_to_args(level):
|
| 242 |
+
level = int((level / MAX_LEVEL) * 256)
|
| 243 |
+
return (level, )
|
| 244 |
+
return level_to_args
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def none_level_to_args(level):
|
| 248 |
+
return ()
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def posterize_level_to_args(MAX_LEVEL):
|
| 252 |
+
def level_to_args(level):
|
| 253 |
+
level = int((level / MAX_LEVEL) * 4)
|
| 254 |
+
return (level, )
|
| 255 |
+
return level_to_args
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def rotate_level_to_args(MAX_LEVEL, replace_value):
|
| 259 |
+
def level_to_args(level):
|
| 260 |
+
level = (level / MAX_LEVEL) * 30
|
| 261 |
+
if np.random.random() < 0.5:
|
| 262 |
+
level = -level
|
| 263 |
+
return (level, replace_value)
|
| 264 |
+
|
| 265 |
+
return level_to_args
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
func_dict = {
|
| 269 |
+
'Identity': identity_func,
|
| 270 |
+
'AutoContrast': autocontrast_func,
|
| 271 |
+
'Equalize': equalize_func,
|
| 272 |
+
'Rotate': rotate_func,
|
| 273 |
+
'Solarize': solarize_func,
|
| 274 |
+
'Color': color_func,
|
| 275 |
+
'Contrast': contrast_func,
|
| 276 |
+
'Brightness': brightness_func,
|
| 277 |
+
'Sharpness': sharpness_func,
|
| 278 |
+
'ShearX': shear_x_func,
|
| 279 |
+
'TranslateX': translate_x_func,
|
| 280 |
+
'TranslateY': translate_y_func,
|
| 281 |
+
'Posterize': posterize_func,
|
| 282 |
+
'ShearY': shear_y_func,
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
translate_const = 10
|
| 286 |
+
MAX_LEVEL = 10
|
| 287 |
+
replace_value = (128, 128, 128)
|
| 288 |
+
arg_dict = {
|
| 289 |
+
'Identity': none_level_to_args,
|
| 290 |
+
'AutoContrast': none_level_to_args,
|
| 291 |
+
'Equalize': none_level_to_args,
|
| 292 |
+
'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value),
|
| 293 |
+
'Solarize': solarize_level_to_args(MAX_LEVEL),
|
| 294 |
+
'Color': enhance_level_to_args(MAX_LEVEL),
|
| 295 |
+
'Contrast': enhance_level_to_args(MAX_LEVEL),
|
| 296 |
+
'Brightness': enhance_level_to_args(MAX_LEVEL),
|
| 297 |
+
'Sharpness': enhance_level_to_args(MAX_LEVEL),
|
| 298 |
+
'ShearX': shear_level_to_args(MAX_LEVEL, replace_value),
|
| 299 |
+
'TranslateX': translate_level_to_args(
|
| 300 |
+
translate_const, MAX_LEVEL, replace_value
|
| 301 |
+
),
|
| 302 |
+
'TranslateY': translate_level_to_args(
|
| 303 |
+
translate_const, MAX_LEVEL, replace_value
|
| 304 |
+
),
|
| 305 |
+
'Posterize': posterize_level_to_args(MAX_LEVEL),
|
| 306 |
+
'ShearY': shear_level_to_args(MAX_LEVEL, replace_value),
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class RandomAugment(object):
|
| 311 |
+
|
| 312 |
+
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
|
| 313 |
+
self.N = N
|
| 314 |
+
self.M = M
|
| 315 |
+
self.isPIL = isPIL
|
| 316 |
+
if augs:
|
| 317 |
+
self.augs = augs
|
| 318 |
+
else:
|
| 319 |
+
self.augs = list(arg_dict.keys())
|
| 320 |
+
|
| 321 |
+
def get_random_ops(self):
|
| 322 |
+
sampled_ops = np.random.choice(self.augs, self.N)
|
| 323 |
+
return [(op, 0.5, self.M) for op in sampled_ops]
|
| 324 |
+
|
| 325 |
+
def __call__(self, img):
|
| 326 |
+
if self.isPIL:
|
| 327 |
+
img = np.array(img)
|
| 328 |
+
ops = self.get_random_ops()
|
| 329 |
+
for name, prob, level in ops:
|
| 330 |
+
if np.random.random() > prob:
|
| 331 |
+
continue
|
| 332 |
+
args = arg_dict[name](level)
|
| 333 |
+
img = func_dict[name](img, *args)
|
| 334 |
+
return img
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
if __name__ == '__main__':
|
| 338 |
+
a = RandomAugment()
|
| 339 |
+
img = np.random.randn(32, 32, 3)
|
| 340 |
+
a(img)
|
eval_agent/eval_tools/t2i_comp/BLIPvqa_eval/utils.py
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
|
| 3 |
+
"""Decay the learning rate"""
|
| 4 |
+
lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr
|
| 5 |
+
for param_group in optimizer.param_groups:
|
| 6 |
+
param_group['lr'] = lr
|
| 7 |
+
|
| 8 |
+
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
|
| 9 |
+
"""Warmup the learning rate"""
|
| 10 |
+
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step)
|
| 11 |
+
for param_group in optimizer.param_groups:
|
| 12 |
+
param_group['lr'] = lr
|
| 13 |
+
|
| 14 |
+
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
|
| 15 |
+
"""Decay the learning rate"""
|
| 16 |
+
lr = max(min_lr, init_lr * (decay_rate**epoch))
|
| 17 |
+
for param_group in optimizer.param_groups:
|
| 18 |
+
param_group['lr'] = lr
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import io
|
| 22 |
+
import os
|
| 23 |
+
import time
|
| 24 |
+
from collections import defaultdict, deque
|
| 25 |
+
import datetime
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
import torch.distributed as dist
|
| 29 |
+
|
| 30 |
+
class SmoothedValue(object):
|
| 31 |
+
"""Track a series of values and provide access to smoothed values over a
|
| 32 |
+
window or the global series average.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, window_size=20, fmt=None):
|
| 36 |
+
if fmt is None:
|
| 37 |
+
fmt = "{median:.4f} ({global_avg:.4f})"
|
| 38 |
+
self.deque = deque(maxlen=window_size)
|
| 39 |
+
self.total = 0.0
|
| 40 |
+
self.count = 0
|
| 41 |
+
self.fmt = fmt
|
| 42 |
+
|
| 43 |
+
def update(self, value, n=1):
|
| 44 |
+
self.deque.append(value)
|
| 45 |
+
self.count += n
|
| 46 |
+
self.total += value * n
|
| 47 |
+
|
| 48 |
+
def synchronize_between_processes(self):
|
| 49 |
+
"""
|
| 50 |
+
Warning: does not synchronize the deque!
|
| 51 |
+
"""
|
| 52 |
+
if not is_dist_avail_and_initialized():
|
| 53 |
+
return
|
| 54 |
+
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
| 55 |
+
dist.barrier()
|
| 56 |
+
dist.all_reduce(t)
|
| 57 |
+
t = t.tolist()
|
| 58 |
+
self.count = int(t[0])
|
| 59 |
+
self.total = t[1]
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def median(self):
|
| 63 |
+
d = torch.tensor(list(self.deque))
|
| 64 |
+
return d.median().item()
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def avg(self):
|
| 68 |
+
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
| 69 |
+
return d.mean().item()
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def global_avg(self):
|
| 73 |
+
return self.total / self.count
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def max(self):
|
| 77 |
+
return max(self.deque)
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def value(self):
|
| 81 |
+
return self.deque[-1]
|
| 82 |
+
|
| 83 |
+
def __str__(self):
|
| 84 |
+
return self.fmt.format(
|
| 85 |
+
median=self.median,
|
| 86 |
+
avg=self.avg,
|
| 87 |
+
global_avg=self.global_avg,
|
| 88 |
+
max=self.max,
|
| 89 |
+
value=self.value)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class MetricLogger(object):
|
| 93 |
+
def __init__(self, delimiter="\t"):
|
| 94 |
+
self.meters = defaultdict(SmoothedValue)
|
| 95 |
+
self.delimiter = delimiter
|
| 96 |
+
|
| 97 |
+
def update(self, **kwargs):
|
| 98 |
+
for k, v in kwargs.items():
|
| 99 |
+
if isinstance(v, torch.Tensor):
|
| 100 |
+
v = v.item()
|
| 101 |
+
assert isinstance(v, (float, int))
|
| 102 |
+
self.meters[k].update(v)
|
| 103 |
+
|
| 104 |
+
def __getattr__(self, attr):
|
| 105 |
+
if attr in self.meters:
|
| 106 |
+
return self.meters[attr]
|
| 107 |
+
if attr in self.__dict__:
|
| 108 |
+
return self.__dict__[attr]
|
| 109 |
+
raise AttributeError("'{}' object has no attribute '{}'".format(
|
| 110 |
+
type(self).__name__, attr))
|
| 111 |
+
|
| 112 |
+
def __str__(self):
|
| 113 |
+
loss_str = []
|
| 114 |
+
for name, meter in self.meters.items():
|
| 115 |
+
loss_str.append(
|
| 116 |
+
"{}: {}".format(name, str(meter))
|
| 117 |
+
)
|
| 118 |
+
return self.delimiter.join(loss_str)
|
| 119 |
+
|
| 120 |
+
def global_avg(self):
|
| 121 |
+
loss_str = []
|
| 122 |
+
for name, meter in self.meters.items():
|
| 123 |
+
loss_str.append(
|
| 124 |
+
"{}: {:.4f}".format(name, meter.global_avg)
|
| 125 |
+
)
|
| 126 |
+
return self.delimiter.join(loss_str)
|
| 127 |
+
|
| 128 |
+
def synchronize_between_processes(self):
|
| 129 |
+
for meter in self.meters.values():
|
| 130 |
+
meter.synchronize_between_processes()
|
| 131 |
+
|
| 132 |
+
def add_meter(self, name, meter):
|
| 133 |
+
self.meters[name] = meter
|
| 134 |
+
|
| 135 |
+
def log_every(self, iterable, print_freq, header=None):
|
| 136 |
+
i = 0
|
| 137 |
+
if not header:
|
| 138 |
+
header = ''
|
| 139 |
+
start_time = time.time()
|
| 140 |
+
end = time.time()
|
| 141 |
+
iter_time = SmoothedValue(fmt='{avg:.4f}')
|
| 142 |
+
data_time = SmoothedValue(fmt='{avg:.4f}')
|
| 143 |
+
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
| 144 |
+
log_msg = [
|
| 145 |
+
header,
|
| 146 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 147 |
+
'eta: {eta}',
|
| 148 |
+
'{meters}',
|
| 149 |
+
'time: {time}',
|
| 150 |
+
'data: {data}'
|
| 151 |
+
]
|
| 152 |
+
if torch.cuda.is_available():
|
| 153 |
+
log_msg.append('max mem: {memory:.0f}')
|
| 154 |
+
log_msg = self.delimiter.join(log_msg)
|
| 155 |
+
MB = 1024.0 * 1024.0
|
| 156 |
+
for obj in iterable:
|
| 157 |
+
data_time.update(time.time() - end)
|
| 158 |
+
yield obj
|
| 159 |
+
iter_time.update(time.time() - end)
|
| 160 |
+
if i % print_freq == 0 or i == len(iterable) - 1:
|
| 161 |
+
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
| 162 |
+
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 163 |
+
if torch.cuda.is_available():
|
| 164 |
+
print(log_msg.format(
|
| 165 |
+
i, len(iterable), eta=eta_string,
|
| 166 |
+
meters=str(self),
|
| 167 |
+
time=str(iter_time), data=str(data_time),
|
| 168 |
+
memory=torch.cuda.max_memory_allocated() / MB))
|
| 169 |
+
else:
|
| 170 |
+
print(log_msg.format(
|
| 171 |
+
i, len(iterable), eta=eta_string,
|
| 172 |
+
meters=str(self),
|
| 173 |
+
time=str(iter_time), data=str(data_time)))
|
| 174 |
+
i += 1
|
| 175 |
+
end = time.time()
|
| 176 |
+
total_time = time.time() - start_time
|
| 177 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 178 |
+
print('{} Total time: {} ({:.4f} s / it)'.format(
|
| 179 |
+
header, total_time_str, total_time / len(iterable)))
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class AttrDict(dict):
|
| 183 |
+
def __init__(self, *args, **kwargs):
|
| 184 |
+
super(AttrDict, self).__init__(*args, **kwargs)
|
| 185 |
+
self.__dict__ = self
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def compute_acc(logits, label, reduction='mean'):
|
| 189 |
+
ret = (torch.argmax(logits, dim=1) == label).float()
|
| 190 |
+
if reduction == 'none':
|
| 191 |
+
return ret.detach()
|
| 192 |
+
elif reduction == 'mean':
|
| 193 |
+
return ret.mean().item()
|
| 194 |
+
|
| 195 |
+
def compute_n_params(model, return_str=True):
|
| 196 |
+
tot = 0
|
| 197 |
+
for p in model.parameters():
|
| 198 |
+
w = 1
|
| 199 |
+
for x in p.shape:
|
| 200 |
+
w *= x
|
| 201 |
+
tot += w
|
| 202 |
+
if return_str:
|
| 203 |
+
if tot >= 1e6:
|
| 204 |
+
return '{:.1f}M'.format(tot / 1e6)
|
| 205 |
+
else:
|
| 206 |
+
return '{:.1f}K'.format(tot / 1e3)
|
| 207 |
+
else:
|
| 208 |
+
return tot
|
| 209 |
+
|
| 210 |
+
def setup_for_distributed(is_master):
|
| 211 |
+
"""
|
| 212 |
+
This function disables printing when not in master process
|
| 213 |
+
"""
|
| 214 |
+
import builtins as __builtin__
|
| 215 |
+
builtin_print = __builtin__.print
|
| 216 |
+
|
| 217 |
+
def print(*args, **kwargs):
|
| 218 |
+
force = kwargs.pop('force', False)
|
| 219 |
+
if is_master or force:
|
| 220 |
+
builtin_print(*args, **kwargs)
|
| 221 |
+
|
| 222 |
+
__builtin__.print = print
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def is_dist_avail_and_initialized():
|
| 226 |
+
if not dist.is_available():
|
| 227 |
+
return False
|
| 228 |
+
if not dist.is_initialized():
|
| 229 |
+
return False
|
| 230 |
+
return True
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def get_world_size():
|
| 234 |
+
if not is_dist_avail_and_initialized():
|
| 235 |
+
return 1
|
| 236 |
+
return dist.get_world_size()
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def get_rank():
|
| 240 |
+
if not is_dist_avail_and_initialized():
|
| 241 |
+
return 0
|
| 242 |
+
return dist.get_rank()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def is_main_process():
|
| 246 |
+
return get_rank() == 0
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def save_on_master(*args, **kwargs):
|
| 250 |
+
if is_main_process():
|
| 251 |
+
torch.save(*args, **kwargs)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def init_distributed_mode(args):
|
| 255 |
+
#加入校验,看是否已经initialize了
|
| 256 |
+
if not dist.is_available():
|
| 257 |
+
return
|
| 258 |
+
if dist.is_initialized():
|
| 259 |
+
return
|
| 260 |
+
|
| 261 |
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
| 262 |
+
args.rank = int(os.environ["RANK"])
|
| 263 |
+
args.world_size = int(os.environ['WORLD_SIZE'])
|
| 264 |
+
args.gpu = int(os.environ['LOCAL_RANK'])
|
| 265 |
+
elif 'SLURM_PROCID' in os.environ:
|
| 266 |
+
args.rank = int(os.environ['SLURM_PROCID'])
|
| 267 |
+
args.gpu = args.rank % torch.cuda.device_count()
|
| 268 |
+
else:
|
| 269 |
+
print('Not using distributed mode')
|
| 270 |
+
args.distributed = False
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
args.distributed = True
|
| 274 |
+
|
| 275 |
+
torch.cuda.set_device(args.gpu)
|
| 276 |
+
args.dist_backend = 'nccl'
|
| 277 |
+
print('| distributed init (rank {}, word {}): {}'.format(
|
| 278 |
+
args.rank, args.world_size, args.dist_url), flush=True)
|
| 279 |
+
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
| 280 |
+
world_size=args.world_size, rank=args.rank)
|
| 281 |
+
torch.distributed.barrier()
|
| 282 |
+
setup_for_distributed(args.rank == 0)
|
| 283 |
+
|
| 284 |
+
|
eval_agent/eval_tools/t2i_comp/CLIPScore_eval/CLIP_similarity.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# image and text similarity
|
| 2 |
+
# ref https://github.com/openai/CLIP
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
import clip
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import spacy
|
| 8 |
+
nlp=spacy.load('en_core_web_sm')
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
import argparse
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 16 |
+
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def parse_args():
|
| 20 |
+
parser = argparse.ArgumentParser()
|
| 21 |
+
parser.add_argument(
|
| 22 |
+
"--outpath",
|
| 23 |
+
type=str,
|
| 24 |
+
default=None,
|
| 25 |
+
required=True,
|
| 26 |
+
help="Path to read samples and output scores"
|
| 27 |
+
)
|
| 28 |
+
parser.add_argument(
|
| 29 |
+
"--complex",
|
| 30 |
+
type=bool,
|
| 31 |
+
default=False,
|
| 32 |
+
help="To evaluate on samples in complex category or not"
|
| 33 |
+
)
|
| 34 |
+
args = parser.parse_args()
|
| 35 |
+
return args
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def main():
|
| 42 |
+
args = parse_args()
|
| 43 |
+
|
| 44 |
+
outpath=args.outpath
|
| 45 |
+
|
| 46 |
+
image_folder=os.path.join(outpath,'samples')
|
| 47 |
+
file_names = os.listdir(image_folder)
|
| 48 |
+
file_names.sort(key=lambda x: int(x.split("_")[-1].split('.')[0])) # sort
|
| 49 |
+
|
| 50 |
+
cnt = 0
|
| 51 |
+
total = []
|
| 52 |
+
|
| 53 |
+
# output annotation.json
|
| 54 |
+
for file_name in file_names:
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
image_path = os.path.join(image_folder,file_name)
|
| 58 |
+
image = preprocess(Image.open(image_path)).unsqueeze(0).to(device)
|
| 59 |
+
prompt = file_name.split("_")[0]
|
| 60 |
+
|
| 61 |
+
if (args.complex):
|
| 62 |
+
doc=nlp(prompt)
|
| 63 |
+
prompt_without_adj=' '.join([token.text for token in doc if token.pos_ != 'ADJ']) #remove adj
|
| 64 |
+
text = clip.tokenize(prompt_without_adj).to(device)
|
| 65 |
+
else:
|
| 66 |
+
text = clip.tokenize(prompt).to(device)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
with torch.no_grad():
|
| 72 |
+
image_features = model.encode_image(image.to(device))
|
| 73 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
text_features = model.encode_text(text)
|
| 77 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 78 |
+
|
| 79 |
+
# Calculate the cosine similarity between the image and text features
|
| 80 |
+
cosine_similarity = (image_features @ text_features.T).squeeze().item()
|
| 81 |
+
|
| 82 |
+
similarity = cosine_similarity
|
| 83 |
+
cnt+=1
|
| 84 |
+
if (cnt % 100 == 0):
|
| 85 |
+
print(f"CLIP image-text:{cnt} prompt(s) have been processed!")
|
| 86 |
+
total.append(similarity)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
#save
|
| 90 |
+
sim_dict=[]
|
| 91 |
+
for i in range(len(total)):
|
| 92 |
+
tmp={}
|
| 93 |
+
tmp['question_id']=i
|
| 94 |
+
tmp["answer"] = total[i]
|
| 95 |
+
sim_dict.append(tmp)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
json_file = json.dumps(sim_dict)
|
| 99 |
+
savepath = os.path.join(outpath,"annotation_clip") #todo
|
| 100 |
+
os.makedirs(savepath, exist_ok=True)
|
| 101 |
+
with open(f'{savepath}/vqa_result.json', 'w') as f:
|
| 102 |
+
f.write(json_file)
|
| 103 |
+
print(f"save to {savepath}")
|
| 104 |
+
|
| 105 |
+
# score avg
|
| 106 |
+
score=0
|
| 107 |
+
for i in range(len(sim_dict)):
|
| 108 |
+
score+=float(sim_dict[i]['answer'])
|
| 109 |
+
with open(f'{savepath}/score_avg.txt', 'w') as f:
|
| 110 |
+
f.write('score avg:'+str(score/len(sim_dict)))
|
| 111 |
+
print("score avg:", score/len(sim_dict))
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
eval_agent/eval_tools/t2i_comp/CLIPScore_eval/CLIP_similarity_eval_agent.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# image and text similarity
|
| 2 |
+
# ref https://github.com/openai/CLIP
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
import clip
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def clipscore(model, preprocess, image_pairs, device):
|
| 11 |
+
total = []
|
| 12 |
+
results = []
|
| 13 |
+
|
| 14 |
+
for info in image_pairs:
|
| 15 |
+
image_path = info["content_path"]
|
| 16 |
+
prompt = info["prompt"]
|
| 17 |
+
|
| 18 |
+
image = preprocess(Image.open(image_path)).unsqueeze(0).to(device)
|
| 19 |
+
text = clip.tokenize(prompt).to(device)
|
| 20 |
+
with torch.no_grad():
|
| 21 |
+
image_features = model.encode_image(image.to(device))
|
| 22 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
text_features = model.encode_text(text)
|
| 26 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 27 |
+
|
| 28 |
+
# Calculate the cosine similarity between the image and text features
|
| 29 |
+
cosine_similarity = (image_features @ text_features.T).squeeze().item()
|
| 30 |
+
|
| 31 |
+
similarity = cosine_similarity
|
| 32 |
+
results.append({'prompt':prompt, 'image_path': image_path, 'image_results': similarity})
|
| 33 |
+
total.append(similarity)
|
| 34 |
+
|
| 35 |
+
avg_score = np.mean(total)
|
| 36 |
+
return {
|
| 37 |
+
"score":[avg_score, results]
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def calculate_clip_score(image_pairs):
|
| 42 |
+
|
| 43 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 44 |
+
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 45 |
+
eval_results = clipscore(model, preprocess, image_pairs, device)
|
| 46 |
+
return eval_results
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
eval_agent/eval_tools/t2i_comp/CLIPScore_eval/clip/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .clip import *
|