Commit ·
bbd5d13
0
Parent(s):
Initial clean SEMA Space package
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +15 -0
- .gitattributes +3 -0
- Dockerfile +25 -0
- RAG/Knowledge_Database/AI_dbmanager.py +750 -0
- RAG/Knowledge_Database/AIdbconfig.py +32 -0
- RAG/Knowledge_Database/RAGFunc.py +1511 -0
- RAG/Knowledge_Database/languagebind_main/1/1 +1 -0
- RAG/Knowledge_Database/languagebind_main/DATASETS.md +66 -0
- RAG/Knowledge_Database/languagebind_main/DATASET_LICENSE +400 -0
- RAG/Knowledge_Database/languagebind_main/LICENSE +21 -0
- RAG/Knowledge_Database/languagebind_main/README.md +422 -0
- RAG/Knowledge_Database/languagebind_main/TRAIN_AND_VALIDATE.md +214 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/class_labels_indices.csv +528 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/dataloader.py +100 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/datasets.py +93 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/filter_eval_audio.py +21 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/precision.py +12 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/stats.py +57 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/util.py +306 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot.py +234 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot_classifier.py +111 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot_metadata.py +184 -0
- RAG/Knowledge_Database/languagebind_main/a_cls/zeroshot_cls.py +46 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/data_dataloaders.py +28 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/dataloader_msrvtt_retrieval.py +114 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/datasets.py +137 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/metrics.py +70 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/precision.py +12 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/retrieval.py +266 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/util.py +73 -0
- RAG/Knowledge_Database/languagebind_main/al_ret/zero_shot.py +91 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/cp_zero_shot_metadata.py +117 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/datasets.py +20 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/precision.py +12 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot.py +90 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot_classifier.py +111 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot_metadata.py +117 -0
- RAG/Knowledge_Database/languagebind_main/d_cls/zeroshot_cls.py +47 -0
- RAG/Knowledge_Database/languagebind_main/data/base_datasets.py +215 -0
- RAG/Knowledge_Database/languagebind_main/data/bpe_simple_vocab_16e6.txt.gz +3 -0
- RAG/Knowledge_Database/languagebind_main/data/build_datasets.py +247 -0
- RAG/Knowledge_Database/languagebind_main/data/new_loadvat.py +498 -0
- RAG/Knowledge_Database/languagebind_main/data/process_audio.py +118 -0
- RAG/Knowledge_Database/languagebind_main/data/process_depth.py +55 -0
- RAG/Knowledge_Database/languagebind_main/data/process_image.py +25 -0
- RAG/Knowledge_Database/languagebind_main/data/process_text.py +202 -0
- RAG/Knowledge_Database/languagebind_main/data/process_thermal.py +26 -0
- RAG/Knowledge_Database/languagebind_main/data/process_video.py +161 -0
- RAG/Knowledge_Database/languagebind_main/gradio_app.py +219 -0
- RAG/Knowledge_Database/languagebind_main/i_cls/datasets.py +31 -0
.dockerignore
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.idea
|
| 3 |
+
__pycache__
|
| 4 |
+
*.pyc
|
| 5 |
+
*.pyo
|
| 6 |
+
*.pyd
|
| 7 |
+
.pytest_cache
|
| 8 |
+
.mypy_cache
|
| 9 |
+
.venv
|
| 10 |
+
venv
|
| 11 |
+
dataset
|
| 12 |
+
evaluation/results
|
| 13 |
+
evaluation/eval_db
|
| 14 |
+
output_keyframes
|
| 15 |
+
webapp/.hf_space_build
|
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.db filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
RAG/Knowledge_Database/languagebind_main/data/bpe_simple_vocab_16e6.txt.gz filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 4 |
+
ENV PYTHONUNBUFFERED=1
|
| 5 |
+
ENV HF_HOME=/tmp/huggingface
|
| 6 |
+
ENV TRANSFORMERS_CACHE=/tmp/huggingface
|
| 7 |
+
ENV SEMA_TMP_DIR=/tmp
|
| 8 |
+
|
| 9 |
+
WORKDIR /app
|
| 10 |
+
|
| 11 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 12 |
+
ffmpeg \
|
| 13 |
+
libgl1 \
|
| 14 |
+
libglib2.0-0 \
|
| 15 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
+
|
| 17 |
+
COPY requirements.txt /app/requirements.txt
|
| 18 |
+
COPY webapp/requirements.txt /app/webapp-requirements.txt
|
| 19 |
+
RUN pip install --no-cache-dir -r /app/requirements.txt -r /app/webapp-requirements.txt
|
| 20 |
+
|
| 21 |
+
COPY . /app
|
| 22 |
+
|
| 23 |
+
EXPOSE 7860
|
| 24 |
+
|
| 25 |
+
CMD ["uvicorn", "webapp.backend.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
RAG/Knowledge_Database/AI_dbmanager.py
ADDED
|
@@ -0,0 +1,750 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
@filename: AI_dbmanager.py
|
| 3 |
+
@description: AI Database Operations Class
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 10 |
+
if REPO_ROOT not in sys.path:
|
| 11 |
+
sys.path.insert(0, REPO_ROOT)
|
| 12 |
+
|
| 13 |
+
# absolute_project_root = r"D:\Pythonworks\SpatialTemporalAttentionGCN-master\SpatialTemporalAttentionGCN-master"
|
| 14 |
+
#
|
| 15 |
+
# project_root = os.path.abspath(absolute_project_root)
|
| 16 |
+
#
|
| 17 |
+
# if project_root not in sys.path:
|
| 18 |
+
# sys.path.insert(0, project_root)
|
| 19 |
+
|
| 20 |
+
from sqlalchemy import Column, Integer, String, Text, LargeBinary, Float, ForeignKey, MetaData, create_engine, inspect, text
|
| 21 |
+
from sqlalchemy.orm import declarative_base, relationship, sessionmaker
|
| 22 |
+
from openai import OpenAI
|
| 23 |
+
try:
|
| 24 |
+
from deep_translator import DeeplTranslator
|
| 25 |
+
except ImportError:
|
| 26 |
+
DeeplTranslator = None
|
| 27 |
+
import pickle
|
| 28 |
+
from RAG.Knowledge_Database.AIdbconfig import engine, session, db_pure_path
|
| 29 |
+
import time
|
| 30 |
+
import numpy as np
|
| 31 |
+
import re
|
| 32 |
+
|
| 33 |
+
rag_split_and_merge = None
|
| 34 |
+
_rag_get_embedding_languagebind_video = None
|
| 35 |
+
_rag_get_embedding_languagebind_text = None
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def _load_ragfunc_helpers():
|
| 39 |
+
global rag_split_and_merge
|
| 40 |
+
global _rag_get_embedding_languagebind_video
|
| 41 |
+
global _rag_get_embedding_languagebind_text
|
| 42 |
+
|
| 43 |
+
if (
|
| 44 |
+
rag_split_and_merge is not None
|
| 45 |
+
and _rag_get_embedding_languagebind_video is not None
|
| 46 |
+
and _rag_get_embedding_languagebind_text is not None
|
| 47 |
+
):
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
from RAG.Knowledge_Database.RAGFunc import (
|
| 51 |
+
split_and_merge as imported_split_and_merge,
|
| 52 |
+
get_embedding_languagebind_video as imported_get_embedding_languagebind_video,
|
| 53 |
+
get_embedding_languagebind_text as imported_get_embedding_languagebind_text,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
rag_split_and_merge = imported_split_and_merge
|
| 57 |
+
_rag_get_embedding_languagebind_video = imported_get_embedding_languagebind_video
|
| 58 |
+
_rag_get_embedding_languagebind_text = imported_get_embedding_languagebind_text
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_embedding(texts,model_name="text-embedding-v4"):
|
| 62 |
+
client = OpenAI(
|
| 63 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 64 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
| 65 |
+
)
|
| 66 |
+
response = client.embeddings.create(
|
| 67 |
+
model=model_name,
|
| 68 |
+
input=texts,
|
| 69 |
+
dimensions=1024,
|
| 70 |
+
encoding_format="float"
|
| 71 |
+
)
|
| 72 |
+
return [np.array(item.embedding, dtype=np.float32) for item in response.data]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def get_embedding_languagebind_video(video_path):
|
| 76 |
+
_load_ragfunc_helpers()
|
| 77 |
+
return _rag_get_embedding_languagebind_video(video_path)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def get_embedding_languagebind_text(texts):
|
| 81 |
+
_load_ragfunc_helpers()
|
| 82 |
+
return _rag_get_embedding_languagebind_text(texts)
|
| 83 |
+
|
| 84 |
+
Base = declarative_base()
|
| 85 |
+
|
| 86 |
+
class Document(Base):
|
| 87 |
+
"""Document table"""
|
| 88 |
+
__tablename__ = "documents"
|
| 89 |
+
|
| 90 |
+
id = Column(Integer, primary_key=True, autoincrement=True)
|
| 91 |
+
title = Column(String(200))
|
| 92 |
+
content = Column(Text)
|
| 93 |
+
title_embedding = Column(LargeBinary)
|
| 94 |
+
|
| 95 |
+
chunks = relationship("Chunk", back_populates="document")
|
| 96 |
+
|
| 97 |
+
class Chunk(Base):
|
| 98 |
+
"""Document chunk table"""
|
| 99 |
+
__tablename__ = "chunks"
|
| 100 |
+
|
| 101 |
+
id = Column(Integer, primary_key=True, autoincrement=True)
|
| 102 |
+
document_id = Column(Integer, ForeignKey("documents.id"))
|
| 103 |
+
text = Column(Text)
|
| 104 |
+
|
| 105 |
+
embedding = Column(LargeBinary) # Store embedding vector (serialized into BLOB)
|
| 106 |
+
|
| 107 |
+
document = relationship("Document", back_populates="chunks")
|
| 108 |
+
|
| 109 |
+
class Embedding(Base):
|
| 110 |
+
"""Embedding table for storing text vector representations"""
|
| 111 |
+
__tablename__ = "embeddings"
|
| 112 |
+
|
| 113 |
+
id = Column(Integer, primary_key=True, autoincrement=True)
|
| 114 |
+
chunk_id = Column(Integer, ForeignKey("chunks.id"))
|
| 115 |
+
vector = Column(LargeBinary) # Store serialized vector
|
| 116 |
+
model_name = Column(String(100)) # Record the embedding model used
|
| 117 |
+
created_at = Column(Float) # Record creation time
|
| 118 |
+
|
| 119 |
+
chunk = relationship("Chunk", back_populates="embeddings")
|
| 120 |
+
|
| 121 |
+
Chunk.embeddings = relationship("Embedding", back_populates="chunk")
|
| 122 |
+
|
| 123 |
+
SECTION_TITLE_RE = re.compile(r"(?m)^//\s*(.+?)\s*$")
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def ensure_database_schema(db_engine):
|
| 127 |
+
"""Create missing tables and add title embedding column for legacy DBs."""
|
| 128 |
+
Base.metadata.create_all(db_engine)
|
| 129 |
+
inspector = inspect(db_engine)
|
| 130 |
+
table_names = set(inspector.get_table_names())
|
| 131 |
+
if "documents" not in table_names:
|
| 132 |
+
return
|
| 133 |
+
|
| 134 |
+
document_columns = {column["name"] for column in inspector.get_columns("documents")}
|
| 135 |
+
if "title_embedding" not in document_columns:
|
| 136 |
+
with db_engine.begin() as conn:
|
| 137 |
+
conn.execute(text("ALTER TABLE documents ADD COLUMN title_embedding BLOB"))
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def split_and_merge(text, chunk_size=500):
|
| 141 |
+
"""Use the repo splitter when available; otherwise fall back to a lightweight local splitter."""
|
| 142 |
+
if rag_split_and_merge is None:
|
| 143 |
+
try:
|
| 144 |
+
_load_ragfunc_helpers()
|
| 145 |
+
except Exception:
|
| 146 |
+
pass
|
| 147 |
+
|
| 148 |
+
if rag_split_and_merge is not None:
|
| 149 |
+
return rag_split_and_merge(text, chunk_size=chunk_size)
|
| 150 |
+
|
| 151 |
+
normalized_text = str(text or "").replace("\r\n", "\n").strip()
|
| 152 |
+
if not normalized_text:
|
| 153 |
+
return []
|
| 154 |
+
|
| 155 |
+
paragraphs = [p.strip() for p in normalized_text.split("\n\n") if p.strip()]
|
| 156 |
+
chunks = []
|
| 157 |
+
sentence_re = re.compile(r"[^。!?;!?;\n]+[。!?;!?;]?")
|
| 158 |
+
|
| 159 |
+
for paragraph in paragraphs:
|
| 160 |
+
sentences = [s.strip() for s in sentence_re.findall(paragraph) if s.strip()]
|
| 161 |
+
if not sentences:
|
| 162 |
+
sentences = [paragraph]
|
| 163 |
+
|
| 164 |
+
current = ""
|
| 165 |
+
for sentence in sentences:
|
| 166 |
+
if len(current) + len(sentence) <= chunk_size:
|
| 167 |
+
current += sentence
|
| 168 |
+
else:
|
| 169 |
+
if current:
|
| 170 |
+
chunks.append(current.strip())
|
| 171 |
+
current = sentence
|
| 172 |
+
|
| 173 |
+
if current:
|
| 174 |
+
chunks.append(current.strip())
|
| 175 |
+
|
| 176 |
+
return chunks
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def parse_structured_knowledge_sections(content):
|
| 180 |
+
"""Parse `//title` knowledge blocks from zhishi-style text."""
|
| 181 |
+
text = str(content or "").replace("\r\n", "\n")
|
| 182 |
+
matches = list(SECTION_TITLE_RE.finditer(text))
|
| 183 |
+
if not matches:
|
| 184 |
+
return []
|
| 185 |
+
|
| 186 |
+
sections = []
|
| 187 |
+
for idx, match in enumerate(matches):
|
| 188 |
+
title = match.group(1).strip()
|
| 189 |
+
start = match.end()
|
| 190 |
+
end = matches[idx + 1].start() if idx + 1 < len(matches) else len(text)
|
| 191 |
+
body = text[start:end].strip()
|
| 192 |
+
if title and body:
|
| 193 |
+
sections.append((title, body))
|
| 194 |
+
return sections
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def build_section_chunks(title, body, chunk_size):
|
| 198 |
+
"""Split one knowledge section into plain text chunks without duplicating the title."""
|
| 199 |
+
normalized_title = str(title or "").strip()
|
| 200 |
+
normalized_body = str(body or "").strip()
|
| 201 |
+
if not normalized_title or not normalized_body:
|
| 202 |
+
return []
|
| 203 |
+
|
| 204 |
+
section_chunks = split_and_merge(normalized_body, chunk_size=chunk_size)
|
| 205 |
+
if not section_chunks:
|
| 206 |
+
section_chunks = [normalized_body]
|
| 207 |
+
|
| 208 |
+
plain_chunks = []
|
| 209 |
+
for chunk_text in section_chunks:
|
| 210 |
+
normalized_chunk = str(chunk_text or "").strip()
|
| 211 |
+
if normalized_chunk:
|
| 212 |
+
plain_chunks.append(normalized_chunk)
|
| 213 |
+
return plain_chunks
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def build_document_chunks(title, content, chunk_size):
|
| 217 |
+
"""Chunk structured KB text per `//title` block before sentence chunking."""
|
| 218 |
+
sections = parse_structured_knowledge_sections(content)
|
| 219 |
+
if not sections:
|
| 220 |
+
return build_section_chunks(title=title, body=content, chunk_size=chunk_size)
|
| 221 |
+
|
| 222 |
+
chunks = []
|
| 223 |
+
for section_title, section_body in sections:
|
| 224 |
+
chunks.extend(build_section_chunks(title=section_title, body=section_body, chunk_size=chunk_size))
|
| 225 |
+
return chunks
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def build_pure_knowledge_db(zhishi_path, db_path, chunk_size=260, rebuild=True):
|
| 229 |
+
"""Build the pure knowledge DB from zhishi.txt using one document per knowledge block."""
|
| 230 |
+
zhishi_path = os.path.abspath(zhishi_path)
|
| 231 |
+
db_path = os.path.abspath(db_path)
|
| 232 |
+
|
| 233 |
+
if not os.path.exists(zhishi_path):
|
| 234 |
+
raise FileNotFoundError(f"Knowledge source file not found: {zhishi_path}")
|
| 235 |
+
|
| 236 |
+
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
| 237 |
+
|
| 238 |
+
pure_engine = create_engine(f"sqlite:///{db_path}", echo=False)
|
| 239 |
+
pure_session_factory = sessionmaker(bind=pure_engine)
|
| 240 |
+
ensure_database_schema(pure_engine)
|
| 241 |
+
pure_session = pure_session_factory()
|
| 242 |
+
|
| 243 |
+
if rebuild and os.path.exists(db_path):
|
| 244 |
+
try:
|
| 245 |
+
pure_session.close()
|
| 246 |
+
pure_engine.dispose()
|
| 247 |
+
os.remove(db_path)
|
| 248 |
+
pure_engine = create_engine(f"sqlite:///{db_path}", echo=False)
|
| 249 |
+
pure_session_factory = sessionmaker(bind=pure_engine)
|
| 250 |
+
ensure_database_schema(pure_engine)
|
| 251 |
+
pure_session = pure_session_factory()
|
| 252 |
+
except PermissionError:
|
| 253 |
+
pure_engine = create_engine(f"sqlite:///{db_path}", echo=False)
|
| 254 |
+
pure_session_factory = sessionmaker(bind=pure_engine)
|
| 255 |
+
pure_session = pure_session_factory()
|
| 256 |
+
ensure_database_schema(pure_engine)
|
| 257 |
+
pure_session.query(Embedding).delete()
|
| 258 |
+
pure_session.query(Chunk).delete()
|
| 259 |
+
pure_session.query(Document).delete()
|
| 260 |
+
pure_session.commit()
|
| 261 |
+
|
| 262 |
+
with open(zhishi_path, "r", encoding="utf-8") as f:
|
| 263 |
+
raw_text = f.read()
|
| 264 |
+
|
| 265 |
+
sections = parse_structured_knowledge_sections(raw_text)
|
| 266 |
+
if not sections:
|
| 267 |
+
raise ValueError(f"No structured `//title` sections found in {zhishi_path}")
|
| 268 |
+
|
| 269 |
+
document_count = 0
|
| 270 |
+
chunk_count = 0
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
for section_title, section_body in sections:
|
| 274 |
+
document = Document(title=section_title, content=section_body)
|
| 275 |
+
pure_session.add(document)
|
| 276 |
+
pure_session.flush()
|
| 277 |
+
|
| 278 |
+
section_chunks = build_section_chunks(
|
| 279 |
+
title=section_title,
|
| 280 |
+
body=section_body,
|
| 281 |
+
chunk_size=chunk_size,
|
| 282 |
+
)
|
| 283 |
+
for chunk_text in section_chunks:
|
| 284 |
+
pure_session.add(
|
| 285 |
+
Chunk(
|
| 286 |
+
document_id=document.id,
|
| 287 |
+
text=chunk_text,
|
| 288 |
+
)
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
document_count += 1
|
| 292 |
+
chunk_count += len(section_chunks)
|
| 293 |
+
|
| 294 |
+
pure_session.commit()
|
| 295 |
+
return {
|
| 296 |
+
"db_path": db_path,
|
| 297 |
+
"zhishi_path": zhishi_path,
|
| 298 |
+
"sections": len(sections),
|
| 299 |
+
"documents": document_count,
|
| 300 |
+
"chunks": chunk_count,
|
| 301 |
+
"embeddings": 0,
|
| 302 |
+
"chunk_size": chunk_size,
|
| 303 |
+
}
|
| 304 |
+
except Exception:
|
| 305 |
+
pure_session.rollback()
|
| 306 |
+
raise
|
| 307 |
+
finally:
|
| 308 |
+
pure_session.close()
|
| 309 |
+
pure_engine.dispose()
|
| 310 |
+
|
| 311 |
+
def batch_translate_files(input_folder, api_key, target_lang='en'):
|
| 312 |
+
"""Batch translate text files using DeepL API"""
|
| 313 |
+
if DeeplTranslator is None:
|
| 314 |
+
raise ImportError("deep_translator is required for batch_translate_files but is not installed.")
|
| 315 |
+
|
| 316 |
+
translator = DeeplTranslator(api_key=api_key, source="zh", target=target_lang, use_free_api=True)
|
| 317 |
+
|
| 318 |
+
output_folder = os.path.join(input_folder, "translated_results")
|
| 319 |
+
if not os.path.exists(output_folder):
|
| 320 |
+
os.makedirs(output_folder)
|
| 321 |
+
|
| 322 |
+
for filename in os.listdir(input_folder):
|
| 323 |
+
if filename.endswith(".txt"):
|
| 324 |
+
file_path = os.path.join(input_folder, filename)
|
| 325 |
+
|
| 326 |
+
print(f"Processing: {filename}...")
|
| 327 |
+
|
| 328 |
+
try:
|
| 329 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 330 |
+
content = f.read().replace('\n', ' ').strip()
|
| 331 |
+
|
| 332 |
+
if not content:
|
| 333 |
+
continue
|
| 334 |
+
|
| 335 |
+
translated_text = translator.translate(content)
|
| 336 |
+
|
| 337 |
+
output_path = os.path.join(output_folder, f"translated_{filename}")
|
| 338 |
+
with open(output_path, 'w', encoding='utf-8') as f:
|
| 339 |
+
f.write(translated_text)
|
| 340 |
+
|
| 341 |
+
print(f"Successfully exported: translated_{filename}")
|
| 342 |
+
|
| 343 |
+
except Exception as e:
|
| 344 |
+
print(f"Failed to process {filename}: {str(e)}")
|
| 345 |
+
|
| 346 |
+
class KnowledgeDB:
|
| 347 |
+
def __init__(self, session):
|
| 348 |
+
self.engine = session.bind if getattr(session, "bind", None) is not None else engine
|
| 349 |
+
self.session = session
|
| 350 |
+
self._ensure_database_schema()
|
| 351 |
+
|
| 352 |
+
def _ensure_database_schema(self):
|
| 353 |
+
"""Create missing tables and add title embedding column for legacy DBs."""
|
| 354 |
+
ensure_database_schema(self.engine)
|
| 355 |
+
|
| 356 |
+
@staticmethod
|
| 357 |
+
def _vector_to_bytes(vector):
|
| 358 |
+
return pickle.dumps(np.asarray(vector, dtype=np.float32))
|
| 359 |
+
|
| 360 |
+
@staticmethod
|
| 361 |
+
def _vector_from_bytes(blob):
|
| 362 |
+
return np.asarray(pickle.loads(blob), dtype=np.float32).flatten()
|
| 363 |
+
|
| 364 |
+
@staticmethod
|
| 365 |
+
def _cosine_similarity(vec, query_vec):
|
| 366 |
+
vec = np.asarray(vec, dtype=np.float32).flatten()
|
| 367 |
+
query_vec = np.asarray(query_vec, dtype=np.float32).flatten()
|
| 368 |
+
denom = np.linalg.norm(vec) * np.linalg.norm(query_vec)
|
| 369 |
+
if denom <= 0:
|
| 370 |
+
return -1.0
|
| 371 |
+
return float(np.dot(vec, query_vec) / denom)
|
| 372 |
+
|
| 373 |
+
def _get_query_vector(self, query, embed_fn):
|
| 374 |
+
query_text = str(query or "").strip()
|
| 375 |
+
if not query_text:
|
| 376 |
+
raise ValueError("query must not be empty")
|
| 377 |
+
return np.asarray(embed_fn([query_text])[0], dtype=np.float32).flatten()
|
| 378 |
+
|
| 379 |
+
def _get_chunk_embeddings_map(self, model_name="default", document_ids=None):
|
| 380 |
+
query = self.session.query(Chunk, Embedding).join(
|
| 381 |
+
Embedding, Embedding.chunk_id == Chunk.id
|
| 382 |
+
).filter(Embedding.model_name == model_name)
|
| 383 |
+
|
| 384 |
+
if document_ids is not None:
|
| 385 |
+
doc_ids = [int(doc_id) for doc_id in document_ids]
|
| 386 |
+
if not doc_ids:
|
| 387 |
+
return {}
|
| 388 |
+
query = query.filter(Chunk.document_id.in_(doc_ids))
|
| 389 |
+
|
| 390 |
+
embeddings_map = {}
|
| 391 |
+
for chunk, embedding in query.all():
|
| 392 |
+
existing = embeddings_map.get(chunk.id)
|
| 393 |
+
if existing is None or embedding.created_at >= existing[1].created_at:
|
| 394 |
+
embeddings_map[chunk.id] = (chunk, embedding)
|
| 395 |
+
return embeddings_map
|
| 396 |
+
|
| 397 |
+
def add_document(self, title, content, embed_fn=None, model_name="default", chunk_size=260):
|
| 398 |
+
"""Add document and split into chunks"""
|
| 399 |
+
doc = Document(title=title, content=content)
|
| 400 |
+
self.session.add(doc)
|
| 401 |
+
self.session.commit()
|
| 402 |
+
|
| 403 |
+
chunks = build_document_chunks(title=title, content=content, chunk_size=chunk_size)
|
| 404 |
+
|
| 405 |
+
for text in chunks:
|
| 406 |
+
chunk = Chunk(
|
| 407 |
+
document_id=doc.id,
|
| 408 |
+
text=text
|
| 409 |
+
)
|
| 410 |
+
self.session.add(chunk)
|
| 411 |
+
|
| 412 |
+
self.session.commit()
|
| 413 |
+
return doc
|
| 414 |
+
|
| 415 |
+
def clear_chunks(self):
|
| 416 |
+
"""Clear all chunks and related embeddings"""
|
| 417 |
+
try:
|
| 418 |
+
self.session.query(Embedding).delete()
|
| 419 |
+
self.session.query(Chunk).delete()
|
| 420 |
+
self.session.commit()
|
| 421 |
+
print("All chunks and embeddings cleared")
|
| 422 |
+
except Exception as e:
|
| 423 |
+
self.session.rollback()
|
| 424 |
+
print(f"Error clearing chunks: {e}")
|
| 425 |
+
|
| 426 |
+
def scan_and_add_chunks(self, chunk_size=260):
|
| 427 |
+
"""Scan all documents and regenerate chunks"""
|
| 428 |
+
try:
|
| 429 |
+
documents = self.session.query(Document).all()
|
| 430 |
+
print(f"Found {len(documents)} documents, generating chunks...")
|
| 431 |
+
|
| 432 |
+
count = 0
|
| 433 |
+
for doc in documents:
|
| 434 |
+
if not doc.content:
|
| 435 |
+
continue
|
| 436 |
+
|
| 437 |
+
chunks = build_document_chunks(title=doc.title, content=doc.content, chunk_size=chunk_size)
|
| 438 |
+
for text in chunks:
|
| 439 |
+
chunk = Chunk(
|
| 440 |
+
document_id=doc.id,
|
| 441 |
+
text=text
|
| 442 |
+
)
|
| 443 |
+
self.session.add(chunk)
|
| 444 |
+
count += 1
|
| 445 |
+
|
| 446 |
+
self.session.commit()
|
| 447 |
+
print(f"Generated {count} chunks for {len(documents)} documents")
|
| 448 |
+
except Exception as e:
|
| 449 |
+
self.session.rollback()
|
| 450 |
+
print(f"Error generating chunks: {e}")
|
| 451 |
+
|
| 452 |
+
def delete_document(self, document_id: int):
|
| 453 |
+
"""Delete document and its related chunks and embeddings"""
|
| 454 |
+
doc = self.session.query(Document).filter(Document.id == document_id).first()
|
| 455 |
+
if not doc:
|
| 456 |
+
print(f"Document {document_id} does not exist")
|
| 457 |
+
return False
|
| 458 |
+
|
| 459 |
+
chunks = self.session.query(Chunk).filter(Chunk.document_id == document_id).all()
|
| 460 |
+
for chunk in chunks:
|
| 461 |
+
self.session.query(Embedding).filter(Embedding.chunk_id == chunk.id).delete()
|
| 462 |
+
|
| 463 |
+
self.session.query(Chunk).filter(Chunk.document_id == document_id).delete()
|
| 464 |
+
self.session.delete(doc)
|
| 465 |
+
self.session.commit()
|
| 466 |
+
return True
|
| 467 |
+
|
| 468 |
+
def delete_embedding(self):
|
| 469 |
+
"""Delete all embeddings"""
|
| 470 |
+
embeddings = self.session.query(Embedding).all()
|
| 471 |
+
for embedding in embeddings:
|
| 472 |
+
self.session.delete(embedding)
|
| 473 |
+
self.session.query(Chunk).update({Chunk.embedding: None}, synchronize_session=False)
|
| 474 |
+
self.session.commit()
|
| 475 |
+
return True
|
| 476 |
+
|
| 477 |
+
def sync_chunk_embedding_column_from_embeddings(self, model_name="default"):
|
| 478 |
+
"""
|
| 479 |
+
Temporary helper: mirror the latest vector in `embeddings` back into `chunks.embedding`.
|
| 480 |
+
Because `chunks.embedding` is a single column, it only stores one snapshot per chunk.
|
| 481 |
+
"""
|
| 482 |
+
print(f"Syncing chunk.embedding from embeddings (model={model_name})...")
|
| 483 |
+
embeddings_map = self._get_chunk_embeddings_map(model_name=model_name)
|
| 484 |
+
if not embeddings_map:
|
| 485 |
+
print("No matching embeddings found to sync.")
|
| 486 |
+
return 0
|
| 487 |
+
|
| 488 |
+
updated_count = 0
|
| 489 |
+
try:
|
| 490 |
+
for chunk, embedding in embeddings_map.values():
|
| 491 |
+
chunk.embedding = embedding.vector
|
| 492 |
+
updated_count += 1
|
| 493 |
+
self.session.commit()
|
| 494 |
+
print(f"Synchronized {updated_count} chunk embeddings.")
|
| 495 |
+
return updated_count
|
| 496 |
+
except Exception:
|
| 497 |
+
self.session.rollback()
|
| 498 |
+
raise
|
| 499 |
+
|
| 500 |
+
def update_embeddings(self, embed_fn, model_name="default", batch_size=10):
|
| 501 |
+
"""Update title embeddings for documents and text embeddings for chunks."""
|
| 502 |
+
if embed_fn is None:
|
| 503 |
+
raise ValueError("embed_fn must not be None")
|
| 504 |
+
batch_size = max(1, min(int(batch_size), 10))
|
| 505 |
+
|
| 506 |
+
print("Starting to update title and chunk embeddings...")
|
| 507 |
+
documents = self.session.query(Document).all()
|
| 508 |
+
chunks = self.session.query(Chunk).all()
|
| 509 |
+
successful_title_batches = 0
|
| 510 |
+
successful_chunk_batches = 0
|
| 511 |
+
|
| 512 |
+
if not documents and not chunks:
|
| 513 |
+
print("No documents or chunks found to update.")
|
| 514 |
+
return
|
| 515 |
+
|
| 516 |
+
self.session.query(Embedding).filter(Embedding.model_name == model_name).delete()
|
| 517 |
+
self.session.query(Document).update({Document.title_embedding: None}, synchronize_session=False)
|
| 518 |
+
self.session.query(Chunk).update({Chunk.embedding: None}, synchronize_session=False)
|
| 519 |
+
self.session.commit()
|
| 520 |
+
|
| 521 |
+
if documents:
|
| 522 |
+
for i in range(0, len(documents), batch_size):
|
| 523 |
+
batch_documents = documents[i:i + batch_size]
|
| 524 |
+
titles_to_embed = [str(doc.title or "").strip() for doc in batch_documents]
|
| 525 |
+
print(f"Processing title batch {i // batch_size + 1}: Sending {len(titles_to_embed)} titles to the embedding model...")
|
| 526 |
+
try:
|
| 527 |
+
title_vectors = embed_fn(titles_to_embed)
|
| 528 |
+
if len(title_vectors) != len(batch_documents):
|
| 529 |
+
raise ValueError(
|
| 530 |
+
f"Mismatch in count for title batch: Received {len(title_vectors)} embeddings for {len(batch_documents)} documents."
|
| 531 |
+
)
|
| 532 |
+
for document, vector in zip(batch_documents, title_vectors):
|
| 533 |
+
document.title_embedding = self._vector_to_bytes(vector)
|
| 534 |
+
self.session.commit()
|
| 535 |
+
successful_title_batches += 1
|
| 536 |
+
except Exception as e:
|
| 537 |
+
print(f"An error occurred during title batch {i // batch_size + 1}: {e}")
|
| 538 |
+
self.session.rollback()
|
| 539 |
+
continue
|
| 540 |
+
|
| 541 |
+
if not chunks:
|
| 542 |
+
print("No chunks found to update after title embedding generation.")
|
| 543 |
+
return
|
| 544 |
+
|
| 545 |
+
for i in range(0, len(chunks), batch_size):
|
| 546 |
+
batch_chunks = chunks[i:i + batch_size]
|
| 547 |
+
texts_to_embed = [chunk.text for chunk in batch_chunks]
|
| 548 |
+
|
| 549 |
+
print(f"Processing chunk batch {i // batch_size + 1}: Sending {len(texts_to_embed)} text chunks to the embedding model...")
|
| 550 |
+
try:
|
| 551 |
+
embedding_vectors = embed_fn(texts_to_embed)
|
| 552 |
+
|
| 553 |
+
if len(embedding_vectors) != len(batch_chunks):
|
| 554 |
+
raise ValueError(
|
| 555 |
+
f"Mismatch in count for batch: Received {len(embedding_vectors)} embeddings for {len(batch_chunks)} chunks."
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
for chunk, vector in zip(batch_chunks, embedding_vectors):
|
| 559 |
+
vector_blob = self._vector_to_bytes(vector)
|
| 560 |
+
chunk.embedding = vector_blob
|
| 561 |
+
embedding = Embedding(
|
| 562 |
+
chunk=chunk,
|
| 563 |
+
vector=vector_blob,
|
| 564 |
+
model_name=model_name,
|
| 565 |
+
created_at=time.time()
|
| 566 |
+
)
|
| 567 |
+
self.session.add(embedding)
|
| 568 |
+
self.session.commit()
|
| 569 |
+
successful_chunk_batches += 1
|
| 570 |
+
|
| 571 |
+
except Exception as e:
|
| 572 |
+
print(f"An error occurred during chunk batch {i // batch_size + 1}: {e}")
|
| 573 |
+
self.session.rollback()
|
| 574 |
+
continue
|
| 575 |
+
|
| 576 |
+
if documents and successful_title_batches == 0:
|
| 577 |
+
raise RuntimeError("Failed to generate any document title embeddings.")
|
| 578 |
+
if chunks and successful_chunk_batches == 0:
|
| 579 |
+
raise RuntimeError("Failed to generate any chunk embeddings.")
|
| 580 |
+
|
| 581 |
+
print("All embeddings updated successfully.")
|
| 582 |
+
|
| 583 |
+
def clear_database(self):
|
| 584 |
+
"""Clear all data in database tables without deleting table structure"""
|
| 585 |
+
meta = MetaData()
|
| 586 |
+
meta.reflect(bind=self.engine)
|
| 587 |
+
with self.session.begin():
|
| 588 |
+
for table in reversed(meta.sorted_tables):
|
| 589 |
+
self.session.execute(table.delete())
|
| 590 |
+
self.session.commit()
|
| 591 |
+
print("Database cleared")
|
| 592 |
+
|
| 593 |
+
def search(self, query, embed_fn, model_name="default", top_k=3):
|
| 594 |
+
"""Search for similar content"""
|
| 595 |
+
query_vec = self._get_query_vector(query, embed_fn)
|
| 596 |
+
results = []
|
| 597 |
+
|
| 598 |
+
for chunk, embedding in self._get_chunk_embeddings_map(model_name=model_name).values():
|
| 599 |
+
score = self._cosine_similarity(self._vector_from_bytes(embedding.vector), query_vec)
|
| 600 |
+
if score > 0.5:
|
| 601 |
+
results.append((chunk, score))
|
| 602 |
+
|
| 603 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
| 604 |
+
return results[:top_k]
|
| 605 |
+
|
| 606 |
+
def search_document_titles(self, query, embed_fn, title_top_k=3):
|
| 607 |
+
"""Search similar document titles using cached title embeddings."""
|
| 608 |
+
query_vec = self._get_query_vector(query, embed_fn)
|
| 609 |
+
results = []
|
| 610 |
+
documents = self.session.query(Document).filter(Document.title_embedding.isnot(None)).all()
|
| 611 |
+
|
| 612 |
+
for document in documents:
|
| 613 |
+
score = self._cosine_similarity(self._vector_from_bytes(document.title_embedding), query_vec)
|
| 614 |
+
results.append((document, score))
|
| 615 |
+
|
| 616 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
| 617 |
+
return results[:title_top_k]
|
| 618 |
+
|
| 619 |
+
def search_chunks_within_documents(self, query, document_ids, embed_fn, model_name="default", chunk_top_k=5):
|
| 620 |
+
"""Search chunks only within the given document ids."""
|
| 621 |
+
query_vec = self._get_query_vector(query, embed_fn)
|
| 622 |
+
results = []
|
| 623 |
+
|
| 624 |
+
for chunk, embedding in self._get_chunk_embeddings_map(model_name=model_name, document_ids=document_ids).values():
|
| 625 |
+
score = self._cosine_similarity(self._vector_from_bytes(embedding.vector), query_vec)
|
| 626 |
+
results.append((chunk, score))
|
| 627 |
+
|
| 628 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
| 629 |
+
return results[:chunk_top_k]
|
| 630 |
+
|
| 631 |
+
def search_knowledge_two_stage(self, query, embed_fn, model_name="default", title_top_k=5, chunk_top_k=8):
|
| 632 |
+
"""Retrieve top titles first, then search chunks inside those title-selected documents."""
|
| 633 |
+
title_results = self.search_document_titles(query, embed_fn=embed_fn, title_top_k=title_top_k)
|
| 634 |
+
document_ids = [document.id for document, _ in title_results]
|
| 635 |
+
|
| 636 |
+
if not document_ids:
|
| 637 |
+
return self.search(query, embed_fn=embed_fn, model_name=model_name, top_k=chunk_top_k)
|
| 638 |
+
|
| 639 |
+
chunk_results = self.search_chunks_within_documents(
|
| 640 |
+
query=query,
|
| 641 |
+
document_ids=document_ids,
|
| 642 |
+
embed_fn=embed_fn,
|
| 643 |
+
model_name=model_name,
|
| 644 |
+
chunk_top_k=chunk_top_k,
|
| 645 |
+
)
|
| 646 |
+
if chunk_results:
|
| 647 |
+
return chunk_results
|
| 648 |
+
return self.search(query, embed_fn=embed_fn, model_name=model_name, top_k=chunk_top_k)
|
| 649 |
+
|
| 650 |
+
def from_video_search(self, query_vec, model_name="default", top_k=3):
|
| 651 |
+
"""Search for similar content from video embedding"""
|
| 652 |
+
results = []
|
| 653 |
+
|
| 654 |
+
chunks = self.session.query(Chunk).join(Embedding).filter(
|
| 655 |
+
Embedding.model_name == model_name
|
| 656 |
+
).all()
|
| 657 |
+
|
| 658 |
+
for chunk in chunks:
|
| 659 |
+
embedding = self.session.query(Embedding).filter(
|
| 660 |
+
Embedding.chunk_id == chunk.id,
|
| 661 |
+
Embedding.model_name == model_name
|
| 662 |
+
).order_by(Embedding.created_at.desc()).first()
|
| 663 |
+
|
| 664 |
+
if embedding:
|
| 665 |
+
vec = pickle.loads(embedding.vector)
|
| 666 |
+
vec = vec.flatten()
|
| 667 |
+
query_vec = query_vec.flatten()
|
| 668 |
+
score = np.dot(vec, query_vec) / (np.linalg.norm(vec) * np.linalg.norm(query_vec))
|
| 669 |
+
# print(f"score:{score}")
|
| 670 |
+
if score > 0.1:
|
| 671 |
+
results.append(chunk.text)
|
| 672 |
+
|
| 673 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
| 674 |
+
return results[:top_k]
|
| 675 |
+
|
| 676 |
+
def embedding_model_choose(self, model):
|
| 677 |
+
"""Choose embedding model"""
|
| 678 |
+
if model == "ali-text-embedding-v3":
|
| 679 |
+
return get_embedding
|
| 680 |
+
elif model == 'languagebind_video':
|
| 681 |
+
return get_embedding_languagebind_video
|
| 682 |
+
elif model == 'languagebind_text':
|
| 683 |
+
return get_embedding_languagebind_text
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
def chat(self, user_question: str, top_k=3, embedding_model="ali-text-embedding-v3", chat_model="deepseek"):
|
| 687 |
+
"""
|
| 688 |
+
Handle chat logic: search context, call LLM, and print response
|
| 689 |
+
"""
|
| 690 |
+
print(f"Received question: {user_question}")
|
| 691 |
+
print("Retrieving relevant information...")
|
| 692 |
+
|
| 693 |
+
try:
|
| 694 |
+
search_results = self.search(
|
| 695 |
+
user_question,
|
| 696 |
+
embed_fn=self.embedding_model_choose(embedding_model),
|
| 697 |
+
model_name=embedding_model,
|
| 698 |
+
top_k=top_k
|
| 699 |
+
)
|
| 700 |
+
except Exception as e:
|
| 701 |
+
print(f"Error during information retrieval: {e}")
|
| 702 |
+
return
|
| 703 |
+
|
| 704 |
+
if not search_results:
|
| 705 |
+
print("No relevant information found.")
|
| 706 |
+
return
|
| 707 |
+
|
| 708 |
+
context_chunks = [chunk.text for chunk, score in search_results]
|
| 709 |
+
context = "\n\n---\n\n".join(context_chunks)
|
| 710 |
+
|
| 711 |
+
print("Context found, generating response...")
|
| 712 |
+
|
| 713 |
+
try:
|
| 714 |
+
response = self.chat_model_choose(chat_model)(prompt=user_question, context=context)
|
| 715 |
+
except Exception as e:
|
| 716 |
+
print(f"Error calling LLM: {e}")
|
| 717 |
+
return
|
| 718 |
+
|
| 719 |
+
print("\nModel response:")
|
| 720 |
+
print(response)
|
| 721 |
+
|
| 722 |
+
# if __name__ == "__main__":
|
| 723 |
+
# rag_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 724 |
+
# zhishi_path = os.path.join(rag_root, "zhishi.txt")
|
| 725 |
+
# stats = build_pure_knowledge_db(
|
| 726 |
+
# zhishi_path=zhishi_path,
|
| 727 |
+
# db_path=db_pure_path,
|
| 728 |
+
# chunk_size=200,
|
| 729 |
+
# rebuild=True,
|
| 730 |
+
# )
|
| 731 |
+
|
| 732 |
+
# print("Pure knowledge DB rebuild complete.")
|
| 733 |
+
# print(f"Source: {stats['zhishi_path']}")
|
| 734 |
+
# print(f"Target DB: {stats['db_path']}")
|
| 735 |
+
# print(f"Sections: {stats['sections']}")
|
| 736 |
+
# print(f"Documents: {stats['documents']}")
|
| 737 |
+
# print(f"Chunks: {stats['chunks']}")
|
| 738 |
+
# print(f"Embeddings: {stats['embeddings']}")
|
| 739 |
+
# if __name__ == "__main__":
|
| 740 |
+
# from RAG.Knowledge_Database.AIdbconfig import session_pure
|
| 741 |
+
|
| 742 |
+
# db = KnowledgeDB(session_pure)
|
| 743 |
+
# # Temporary one-off backfill for existing rows, if needed:
|
| 744 |
+
# # db.sync_chunk_embedding_column_from_embeddings(model_name="ali-text-embedding-v4")
|
| 745 |
+
# db.update_embeddings(
|
| 746 |
+
# embed_fn=get_embedding,
|
| 747 |
+
# model_name="ali-text-embedding-v4",
|
| 748 |
+
# batch_size=10,
|
| 749 |
+
# )
|
| 750 |
+
|
RAG/Knowledge_Database/AIdbconfig.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
@filename: dbconfig.py
|
| 3 |
+
@description: AI database file interface for loading AI database
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
from sqlalchemy import create_engine
|
| 8 |
+
from sqlalchemy.orm import sessionmaker
|
| 9 |
+
|
| 10 |
+
# Get absolute path of current script file (AIdbconfig.py)
|
| 11 |
+
script_path = os.path.abspath(__file__)
|
| 12 |
+
# Get directory containing the script
|
| 13 |
+
script_dir = os.path.dirname(script_path)
|
| 14 |
+
# Get project root directory (parent of script directory)
|
| 15 |
+
project_root = os.path.dirname(script_dir)
|
| 16 |
+
# Construct absolute path to database file
|
| 17 |
+
db_path = os.path.join(project_root, "db_files", "LLM_Knowledge_New_ali_zh.db")
|
| 18 |
+
db_en_path = os.path.join(project_root, "db_files", "LLM_Knowledge_New_En.db")
|
| 19 |
+
db_pure_path = os.path.join(project_root, "db_files", "LLM_Knowledge_Pure.db")
|
| 20 |
+
|
| 21 |
+
DATABASE_URL = 'sqlite:///' + db_path
|
| 22 |
+
DATABASE_EN_URL = 'sqlite:///' + db_en_path
|
| 23 |
+
DATABASE_PURE_URL = 'sqlite:///' + db_pure_path
|
| 24 |
+
|
| 25 |
+
engine = create_engine(DATABASE_URL, echo=False)
|
| 26 |
+
engine_en = create_engine(DATABASE_EN_URL, echo=False)
|
| 27 |
+
engine_pure = create_engine(DATABASE_PURE_URL, echo=False)
|
| 28 |
+
|
| 29 |
+
# Create Session class
|
| 30 |
+
session = sessionmaker(bind=engine)()
|
| 31 |
+
session_en = sessionmaker(bind=engine_en)()
|
| 32 |
+
session_pure = sessionmaker(bind=engine_pure)()
|
RAG/Knowledge_Database/RAGFunc.py
ADDED
|
@@ -0,0 +1,1511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
@filename: RAG.py
|
| 3 |
+
@description: Implementation of all RAG functions
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import re
|
| 8 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 9 |
+
import numpy as np
|
| 10 |
+
from openai import OpenAI
|
| 11 |
+
import requests
|
| 12 |
+
import json
|
| 13 |
+
from RAG.Knowledge_Database.languagebind_main.languagebind import LanguageBindImageTokenizer,LanguageBind, to_device, LanguageBindVideo, LanguageBindVideoTokenizer, LanguageBindVideoProcessor, transform_dict
|
| 14 |
+
import torch
|
| 15 |
+
import json
|
| 16 |
+
from typing import List, Union, Dict, Any, Literal
|
| 17 |
+
import base64
|
| 18 |
+
from RTMPose.Bone_Feature_Extract import *
|
| 19 |
+
|
| 20 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 21 |
+
|
| 22 |
+
def split_and_merge(text, chunk_size=500):
|
| 23 |
+
"""
|
| 24 |
+
Split text using LangChain, then merge into chunks close to chunk_size
|
| 25 |
+
- Does not cross paragraphs
|
| 26 |
+
- Does not break sentences
|
| 27 |
+
- Punctuation at end goes to previous chunk
|
| 28 |
+
- Remove leading punctuation from chunks
|
| 29 |
+
"""
|
| 30 |
+
splitter = RecursiveCharacterTextSplitter(
|
| 31 |
+
chunk_size=chunk_size,
|
| 32 |
+
chunk_overlap=0,
|
| 33 |
+
separators=["\n\n", "\n", ".", ".", "!", "!", "?", "?", ";", ";"]
|
| 34 |
+
)
|
| 35 |
+
pieces = splitter.split_text(text)
|
| 36 |
+
|
| 37 |
+
chunks = []
|
| 38 |
+
current_chunk = ""
|
| 39 |
+
current_len = 0
|
| 40 |
+
punctuations = {".", ".", "!", "!", "?", "?", ";", ";"}
|
| 41 |
+
|
| 42 |
+
for piece in pieces:
|
| 43 |
+
piece = piece.strip()
|
| 44 |
+
if not piece:
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
if "\n\n" in piece:
|
| 48 |
+
if current_chunk:
|
| 49 |
+
chunks.append(current_chunk.strip())
|
| 50 |
+
current_chunk = ""
|
| 51 |
+
current_len = 0
|
| 52 |
+
piece = piece.replace("\n\n", "").strip()
|
| 53 |
+
if not piece:
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
if piece in punctuations:
|
| 57 |
+
current_chunk += piece
|
| 58 |
+
current_len += len(piece)
|
| 59 |
+
continue
|
| 60 |
+
|
| 61 |
+
piece_len = len(piece)
|
| 62 |
+
if current_len + piece_len <= chunk_size:
|
| 63 |
+
current_chunk += piece
|
| 64 |
+
current_len += piece_len
|
| 65 |
+
else:
|
| 66 |
+
if current_chunk:
|
| 67 |
+
while current_chunk and current_chunk[0] in punctuations:
|
| 68 |
+
current_chunk = current_chunk[1:].lstrip()
|
| 69 |
+
if current_chunk:
|
| 70 |
+
chunks.append(current_chunk.strip())
|
| 71 |
+
current_chunk = piece
|
| 72 |
+
current_len = piece_len
|
| 73 |
+
|
| 74 |
+
if current_chunk:
|
| 75 |
+
while current_chunk and current_chunk[0] in punctuations:
|
| 76 |
+
current_chunk = current_chunk[1:].lstrip()
|
| 77 |
+
if current_chunk:
|
| 78 |
+
chunks.append(current_chunk.strip())
|
| 79 |
+
|
| 80 |
+
return chunks
|
| 81 |
+
|
| 82 |
+
def get_embedding(texts):
|
| 83 |
+
"""Call OpenAI API to get text embedding vectors"""
|
| 84 |
+
client = OpenAI(
|
| 85 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 86 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
| 87 |
+
)
|
| 88 |
+
response = client.embeddings.create(
|
| 89 |
+
model="text-embedding-v4",
|
| 90 |
+
input=texts,
|
| 91 |
+
dimensions=1024,
|
| 92 |
+
encoding_format="float"
|
| 93 |
+
)
|
| 94 |
+
embeddings = [np.array(item.embedding, dtype=np.float32) for item in response.data]
|
| 95 |
+
return embeddings
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def video_to_base64(video_path):
|
| 99 |
+
with open(video_path, "rb") as video_file:
|
| 100 |
+
encoded = base64.b64encode(video_file.read()).decode("utf-8")
|
| 101 |
+
return encoded
|
| 102 |
+
|
| 103 |
+
def _media_type_from_ext(path: str) -> str:
|
| 104 |
+
ext = os.path.splitext(str(path))[1].lower()
|
| 105 |
+
if ext in (".jpg", ".jpeg"):
|
| 106 |
+
return "image/jpeg"
|
| 107 |
+
if ext == ".png":
|
| 108 |
+
return "image/png"
|
| 109 |
+
return "image/jpeg"
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _load_openai_images_from_folder(folder: str) -> list[dict]:
|
| 113 |
+
folder_path = os.path.abspath(str(folder or "").strip())
|
| 114 |
+
if not folder_path or not os.path.isdir(folder_path):
|
| 115 |
+
raise ValueError(
|
| 116 |
+
f"Template keyframes directory not found: {folder_path}. "
|
| 117 |
+
"Please run extract_reference_keyframes.py first."
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
ordered_paths = []
|
| 121 |
+
numeric_files = []
|
| 122 |
+
for name in os.listdir(folder_path):
|
| 123 |
+
full_path = os.path.join(folder_path, name)
|
| 124 |
+
stem, ext = os.path.splitext(name)
|
| 125 |
+
if not os.path.isfile(full_path):
|
| 126 |
+
continue
|
| 127 |
+
# Pipeline 4 template keyframes: only *.jpg named as integers (1.jpg, 2.jpg, ...).
|
| 128 |
+
if ext.lower() != ".jpg":
|
| 129 |
+
continue
|
| 130 |
+
stem_stripped = str(stem).strip()
|
| 131 |
+
if not stem_stripped.isdigit():
|
| 132 |
+
continue
|
| 133 |
+
numeric_files.append((int(stem_stripped), os.path.abspath(full_path)))
|
| 134 |
+
|
| 135 |
+
numeric_files.sort(key=lambda x: x[0])
|
| 136 |
+
ordered_paths = [p for _, p in numeric_files]
|
| 137 |
+
|
| 138 |
+
if not ordered_paths:
|
| 139 |
+
raise ValueError(
|
| 140 |
+
f"No numeric .jpg template keyframes found in {folder_path}. "
|
| 141 |
+
"Expected files like 1.jpg, 2.jpg, 3.jpg."
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
openai_images = []
|
| 145 |
+
for img_path in ordered_paths:
|
| 146 |
+
try:
|
| 147 |
+
with open(img_path, "rb") as f:
|
| 148 |
+
b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 149 |
+
except Exception as e:
|
| 150 |
+
raise ValueError(f"Failed to read template keyframe image: {img_path}. {e}") from e
|
| 151 |
+
|
| 152 |
+
media_type = _media_type_from_ext(img_path)
|
| 153 |
+
openai_images.append(
|
| 154 |
+
{
|
| 155 |
+
"type": "input_image",
|
| 156 |
+
"image_url": f"data:{media_type};base64,{b64}",
|
| 157 |
+
}
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
return openai_images
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def get_video_ori_keywords(
|
| 164 |
+
video_path,
|
| 165 |
+
pipeline=1,
|
| 166 |
+
target_k=5,
|
| 167 |
+
model_name='qwen3-vl-plus',
|
| 168 |
+
language='zh',
|
| 169 |
+
show=False,
|
| 170 |
+
template_keyframes_dir = None,
|
| 171 |
+
) -> dict:
|
| 172 |
+
"""
|
| 173 |
+
Multi-stage multimodal assessment for archery posture.
|
| 174 |
+
|
| 175 |
+
Pipeline semantics:
|
| 176 |
+
1) Video only: user content contains ONLY video_url (no extra text).
|
| 177 |
+
2) Keyframes only: extracted keyframes are sent as input_image parts (Responses API).
|
| 178 |
+
3) Keyframes + biomechanics metrics: keyframes + metrics text (Responses API).
|
| 179 |
+
4) Keyframes + biomechanics metrics + template keyframes loaded from folder.
|
| 180 |
+
|
| 181 |
+
Return:
|
| 182 |
+
A normalized assessment dict with keys:
|
| 183 |
+
total_score, head_score, hand_score, torso_score, foot_score, arm_score, comment
|
| 184 |
+
where `comment` is a keyword list split by comma.
|
| 185 |
+
"""
|
| 186 |
+
def _normalize_language(lang_in: str) -> str:
|
| 187 |
+
lang_norm = str(lang_in or "en").strip().lower()
|
| 188 |
+
if lang_norm in ("zh", "zh-cn", "zh_hans", "cn", "chinese", "中文"):
|
| 189 |
+
return "zh"
|
| 190 |
+
if lang_norm in ("en", "en-us", "english"):
|
| 191 |
+
return "en"
|
| 192 |
+
raise ValueError("language must be 'en' or 'zh'")
|
| 193 |
+
|
| 194 |
+
def _chat_completion_output_text(resp) -> str:
|
| 195 |
+
if resp is None or not getattr(resp, "choices", None):
|
| 196 |
+
raise ValueError("Chat completion returned empty response.")
|
| 197 |
+
|
| 198 |
+
message = getattr(resp.choices[0], "message", None)
|
| 199 |
+
content = getattr(message, "content", None)
|
| 200 |
+
if isinstance(content, str) and content.strip():
|
| 201 |
+
return content.strip()
|
| 202 |
+
|
| 203 |
+
if isinstance(content, list):
|
| 204 |
+
parts = []
|
| 205 |
+
for item in content:
|
| 206 |
+
if isinstance(item, dict):
|
| 207 |
+
if item.get("type") == "text" and item.get("text"):
|
| 208 |
+
parts.append(str(item["text"]))
|
| 209 |
+
else:
|
| 210 |
+
text = getattr(item, "text", None)
|
| 211 |
+
if text:
|
| 212 |
+
parts.append(str(text))
|
| 213 |
+
if parts:
|
| 214 |
+
return "".join(parts).strip()
|
| 215 |
+
|
| 216 |
+
raise ValueError("Chat completion contains no textual content.")
|
| 217 |
+
|
| 218 |
+
def _to_chat_content(input_content: list[dict]) -> list[dict]:
|
| 219 |
+
chat_content = []
|
| 220 |
+
for item in input_content:
|
| 221 |
+
if not isinstance(item, dict):
|
| 222 |
+
continue
|
| 223 |
+
item_type = item.get("type")
|
| 224 |
+
if item_type == "input_text":
|
| 225 |
+
chat_content.append({"type": "text", "text": str(item.get("text", ""))})
|
| 226 |
+
elif item_type == "input_image":
|
| 227 |
+
image_url = item.get("image_url")
|
| 228 |
+
if isinstance(image_url, dict):
|
| 229 |
+
image_url = image_url.get("url")
|
| 230 |
+
if image_url:
|
| 231 |
+
chat_content.append({"type": "image_url", "image_url": {"url": str(image_url)}})
|
| 232 |
+
if not chat_content:
|
| 233 |
+
raise ValueError("No valid chat content converted from keyframe inputs.")
|
| 234 |
+
return chat_content
|
| 235 |
+
|
| 236 |
+
def _extract_json_object_text(raw_text: str) -> str:
|
| 237 |
+
text = str(raw_text or "").strip()
|
| 238 |
+
if not text:
|
| 239 |
+
raise ValueError("Model response is empty; expected a JSON object string.")
|
| 240 |
+
|
| 241 |
+
decoder = json.JSONDecoder()
|
| 242 |
+
for idx, ch in enumerate(text):
|
| 243 |
+
if ch != "{":
|
| 244 |
+
continue
|
| 245 |
+
try:
|
| 246 |
+
parsed, end = decoder.raw_decode(text[idx:])
|
| 247 |
+
except json.JSONDecodeError:
|
| 248 |
+
continue
|
| 249 |
+
if isinstance(parsed, dict):
|
| 250 |
+
return text[idx: idx + end]
|
| 251 |
+
raise ValueError("No valid JSON object found in model response.")
|
| 252 |
+
|
| 253 |
+
def _normalize_comment_keywords(raw_comment) -> list[str]:
|
| 254 |
+
if isinstance(raw_comment, list):
|
| 255 |
+
keywords = [str(item).strip() for item in raw_comment]
|
| 256 |
+
return [k for k in keywords if k]
|
| 257 |
+
|
| 258 |
+
text = str(raw_comment or "").replace(",", ",")
|
| 259 |
+
parts = [p.strip() for p in text.split(",")]
|
| 260 |
+
return [p for p in parts if p]
|
| 261 |
+
|
| 262 |
+
def _parse_assessment_payload(raw_text: str) -> dict:
|
| 263 |
+
required_keys = (
|
| 264 |
+
"total_score",
|
| 265 |
+
"head_score",
|
| 266 |
+
"hand_score",
|
| 267 |
+
"torso_score",
|
| 268 |
+
"foot_score",
|
| 269 |
+
"arm_score",
|
| 270 |
+
"comment",
|
| 271 |
+
)
|
| 272 |
+
json_text = _extract_json_object_text(raw_text)
|
| 273 |
+
try:
|
| 274 |
+
payload = json.loads(json_text)
|
| 275 |
+
except Exception as e:
|
| 276 |
+
raise ValueError(f"Failed to parse model JSON response: {e}") from e
|
| 277 |
+
|
| 278 |
+
if not isinstance(payload, dict):
|
| 279 |
+
raise ValueError("Model response JSON is not an object.")
|
| 280 |
+
|
| 281 |
+
missing = [k for k in required_keys if k not in payload]
|
| 282 |
+
if missing:
|
| 283 |
+
raise ValueError(f"Model response JSON missing required keys: {missing}")
|
| 284 |
+
|
| 285 |
+
out = {}
|
| 286 |
+
for key in required_keys[:-1]:
|
| 287 |
+
try:
|
| 288 |
+
out[key] = int(payload[key])
|
| 289 |
+
except Exception as e:
|
| 290 |
+
raise ValueError(f"Invalid score field '{key}': {payload.get(key)}") from e
|
| 291 |
+
out["comment"] = _normalize_comment_keywords(payload.get("comment"))
|
| 292 |
+
return out
|
| 293 |
+
|
| 294 |
+
lang = _normalize_language(language)
|
| 295 |
+
pipeline = int(pipeline)
|
| 296 |
+
if pipeline not in (1, 2, 3, 4):
|
| 297 |
+
raise ValueError(
|
| 298 |
+
"pipeline must be 1 (video), 2 (keyframes), 3 (keyframes+metrics), "
|
| 299 |
+
"or 4 (keyframes+metrics+template keyframes)"
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
# client = OpenAI(
|
| 304 |
+
# base_url="https://openrouter.ai/api/v1",
|
| 305 |
+
# api_key=os.environ.get("OPENROUTER_API_KEY"),
|
| 306 |
+
# )
|
| 307 |
+
client = OpenAI(
|
| 308 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 309 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
system_base_map = {
|
| 313 |
+
"en": (
|
| 314 |
+
"You are a professional archery coach.\n"
|
| 315 |
+
"Use ONLY the user-provided inputs (video/keyframes and optional biomechanics metrics).\n"
|
| 316 |
+
"You must NOT search for information or draw upon your own prior knowledge.\n"
|
| 317 |
+
"Output MUST be ONLY a JSON object string with keys: "
|
| 318 |
+
"'total_score', 'head_score', 'hand_score', 'torso_score', 'foot_score', 'arm_score', 'comment'.\n"
|
| 319 |
+
"All scores MUST be integers: each part score in [0,5], total_score in [0,25].\n"
|
| 320 |
+
"total_score MUST equal head_score + hand_score + torso_score + foot_score + arm_score.\n"
|
| 321 |
+
"In 'comment', output a concise keyword sequence.\n"
|
| 322 |
+
"Use short phrases separated by commas.\n"
|
| 323 |
+
"Do not add extra keys, markdown, code fences, or any other text."
|
| 324 |
+
),
|
| 325 |
+
"zh": (
|
| 326 |
+
"你是一名专业的射箭教练.\n"
|
| 327 |
+
"请参考下文完成指定的任务,下面先提出对输入及输出的要求.\n"
|
| 328 |
+
"只能使用用户提供的输入(视频/关键帧以及可选的人体生物力学指标),不得搜索或引用已知知识,也不能输出不基于实时评判的结果\n"
|
| 329 |
+
"输出必须且只能是一个 JSON 对象字符串,键必须为:"
|
| 330 |
+
"'total_score', 'head_score', 'hand_score', 'torso_score', 'foot_score', 'arm_score', 'comment'.\n"
|
| 331 |
+
"所有分数必须为整数:各部位分数范围 [0,5],总分 total_score 范围 [0,25].\n"
|
| 332 |
+
"comment字段输出简洁的关键句序列\n"
|
| 333 |
+
"用短语,使用&号分隔各序列中的关键句(关键句1&关键句2......这样的形式).\n"
|
| 334 |
+
"不要添加额外键、Markdown、代码块或任何其他文本"
|
| 335 |
+
),
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
system_task_video_map = {
|
| 339 |
+
"en": (
|
| 340 |
+
"\nTask:\n"
|
| 341 |
+
"You will receive a full archery video. Observe and judge whether the archer achieves the following.\n"
|
| 342 |
+
"Static / aiming-phase observations (mainly from earlier parts):\n"
|
| 343 |
+
"- Body upright/vertical axis stable\n"
|
| 344 |
+
"- Head rotates sufficiently toward target\n"
|
| 345 |
+
"- Arms form a straight line / alignment is correct\n"
|
| 346 |
+
"- Rear hand firmly anchored against the chin\n"
|
| 347 |
+
"- Nose touches the bowstring (string touches chin/nose)\n"
|
| 348 |
+
"- Bow hand fingers are relaxed (not clenched)\n"
|
| 349 |
+
"- Before release, rear hand does NOT slip/loosen the string during aiming\n"
|
| 350 |
+
"Release / follow-through observations (around release and after):\n"
|
| 351 |
+
"- At release, rear hand extends backward (follow-through)\n"
|
| 352 |
+
"- At release, front/bow hand relaxes and pushes/lets the bow move naturally (not grabbing)\n"
|
| 353 |
+
"Then output the required JSON with preliminary scores and a keyword-style comment.\n"
|
| 354 |
+
"Keywords MUST be concise, and MUST be selected from the following keyword library (use whichever items apply).\n"
|
| 355 |
+
"Keyword library: separate keywords with \",\".\n"
|
| 356 |
+
"1. Torso: (1) Correct: Body vertical axis is perpendicular to the ground; body is side-on to the shooting direction; "
|
| 357 |
+
"(2) Incorrect: Body center of mass is tilted/leaning; body is not rotated to a side-on stance.\n"
|
| 358 |
+
"2. Arms: (1) Correct: Front hand, front shoulder, and rear shoulder are aligned on one line; front forearm is internally rotated; "
|
| 359 |
+
"rear forearm is aligned with the arrow; "
|
| 360 |
+
"(2) Incorrect: Front shoulder has an angle and is not opened; forearm is turned outward; forearm is not aligned with the arrow.\n"
|
| 361 |
+
"3. Head: (1) Correct: Head rotates horizontally; (2) Incorrect: Head does not rotate enough.\n"
|
| 362 |
+
"4. Feet: (1) Correct: Feet stance is shoulder-width; (2) Incorrect: Feet are placed arbitrarily.\n"
|
| 363 |
+
"5. Hands: (1) Correct: Bow-hand fingers are relaxed; rear hand is anchored at the cheek/jaw area; "
|
| 364 |
+
"the bowstring touches the chin and nose; during aiming the rear hand does not slip/loosen the string; "
|
| 365 |
+
"at release the rear hand extends backward; at release the front/bow hand relaxes and pushes the bow; "
|
| 366 |
+
"(2) Incorrect: Fingers are stiff or clenched; rear hand is floating and not anchored; string does not touch chin and nose; "
|
| 367 |
+
"before/after release the rear hand slowly slides backward; at release the rear hand stays in place or moves outward/forward; "
|
| 368 |
+
"at release the front/bow hand grabs the bow.\n"
|
| 369 |
+
),
|
| 370 |
+
"zh": (
|
| 371 |
+
"\n任务:\n"
|
| 372 |
+
"你将看到一个射箭短视频\n"
|
| 373 |
+
"从视频中观察并判断下面的动作,如果符合则comment中严格加入(一字不差)对应标准后第一个括号内关键句库中的正确动作(R),反之加上对应的错误动作(W),并按照评分规则(S)对分数进行初评,再按下文中的信息修正你的评价(如果出现)\n"
|
| 374 |
+
"1.身体是否直立(R:身体纵轴垂直于地面/W:身体重心歪斜)(S:可认为基本垂直的情况躯干部位给4/5分,出现歪斜给2/3分)\n"
|
| 375 |
+
"2.身体是否转到侧对(S:一般给出R:身体侧向对着射箭方向 的评价,除非极其离谱,给出W:身体没有转到侧对)\n"
|
| 376 |
+
"3.头部是否转动到位(R:头部水平转动/W:头部没有转动到位)(S:转动到位给5分,不到位给3分,极其离谱情况给1/2分)\n"
|
| 377 |
+
"4.手臂是否成一条直线(R:前手,前肩,后肩在一条线上,后手小臂与箭在一条线上/W:小臂与箭有角度)(S:后肩部有角度给2/3分,否则4/5分)\n"
|
| 378 |
+
"5.后手是否牢牢靠在下巴上(R:后手靠在脸颊下颌位置,弓弦靠在下巴和鼻子上/W:后手悬空没有贴实,弦没有贴上下巴和鼻子)(S:手部评分规则参看[手部评分规则])\n"
|
| 379 |
+
"6.握弓手的手指是否放松,以在放箭之后,弓箭是否自然向前转动为标准(R:推弓手手指放松/W:手指僵直或握紧)\n"
|
| 380 |
+
"7.后手在未放箭的瞄准阶段是否有松弦现象(一般都给出R:瞄准时后手没有松滑弦 的评价/过于离谱给出W:瞄准时后手松滑弦)\n"
|
| 381 |
+
"8.双脚开步与肩同宽(R:双脚开步与肩同宽/W:双脚随意站立)(S:一般给出满分,出现极其离谱的情况可以给0/1分)\n"
|
| 382 |
+
"9.放箭时后手是否在箭射出时向后延展(随动)(R:撒放时后手顺势向后延展/W:撒放时后手定在原地或向外,向前)\n"
|
| 383 |
+
"10.放箭时前手是否放松推弓(箭射出后握弓手保持放松,弓自然转动)(R:撒放时前手放松推弓/W:撒放时前手握弓)\n"
|
| 384 |
+
"随后按要求输出JSON,给出初步评分与关键词化comment(关键词应精炼,必须使用以下的表达(符合哪项表达就使用哪项表达))\n"
|
| 385 |
+
"评分规则:\n"
|
| 386 |
+
"[手部评分规则]:三条标准:靠下巴(手指必须盖住下巴的一部分,牢牢贴合才算贴合,否则就是没有贴合),撒放向后移动,推弓(撒放后弓必须自然转动)这三条完全满足5分,少一条扣一分"
|
| 387 |
+
)
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
system_task_keyframes_map = {
|
| 391 |
+
"en": (
|
| 392 |
+
"\nTask:\n"
|
| 393 |
+
"You will receive a time-ordered sequence of keyframes extracted from the video (early -> late).\n"
|
| 394 |
+
"Use earlier keyframes to judge static posture/aiming quality:\n"
|
| 395 |
+
"- Body upright/vertical axis stable\n"
|
| 396 |
+
"- Head rotates sufficiently toward target\n"
|
| 397 |
+
"- Arms form a straight line / alignment is correct\n"
|
| 398 |
+
"- Rear hand firmly anchored against the chin\n"
|
| 399 |
+
"- Nose touches the bowstring (string touches chin/nose)\n"
|
| 400 |
+
"- Bow hand fingers are relaxed\n"
|
| 401 |
+
"- Before release, rear hand does NOT slip/loosen the string during aiming\n"
|
| 402 |
+
"Use later keyframes AND their changes to judge release/follow-through quality:\n"
|
| 403 |
+
"- At release, rear hand extends backward (follow-through)\n"
|
| 404 |
+
"- At release, front/bow hand relaxes and pushes/lets the bow move naturally (not grabbing)\n"
|
| 405 |
+
"Important: use the changes across later keyframes to infer release quality, not a single still.\n"
|
| 406 |
+
"Then output the required JSON with preliminary scores and a keyword-style comment.\n"
|
| 407 |
+
"Keywords MUST be concise, and MUST be selected from the following keyword library (use whichever items apply).\n"
|
| 408 |
+
"Keyword library: separate keywords with \",\".\n"
|
| 409 |
+
"1. Torso: (1) Correct: Body vertical axis is perpendicular to the ground; body is side-on to the shooting direction; "
|
| 410 |
+
"(2) Incorrect: Body center of mass is tilted/leaning; body is not rotated to a side-on stance.\n"
|
| 411 |
+
"2. Arms: (1) Correct: Front hand, front shoulder, and rear shoulder are aligned on one line; front forearm is internally rotated; "
|
| 412 |
+
"rear forearm is aligned with the arrow; "
|
| 413 |
+
"(2) Incorrect: Front shoulder has an angle and is not opened; forearm is turned outward; forearm is not aligned with the arrow.\n"
|
| 414 |
+
"3. Head: (1) Correct: Head rotates horizontally; (2) Incorrect: Head does not rotate enough.\n"
|
| 415 |
+
"4. Feet: (1) Correct: Feet stance is shoulder-width; (2) Incorrect: Feet are placed arbitrarily.\n"
|
| 416 |
+
"5. Hands: (1) Correct: Bow-hand fingers are relaxed; rear hand is anchored at the cheek/jaw area; "
|
| 417 |
+
"the bowstring touches the chin and nose; during aiming the rear hand does not slip/loosen the string; "
|
| 418 |
+
"at release the rear hand extends backward; at release the front/bow hand relaxes and pushes the bow; "
|
| 419 |
+
"(2) Incorrect: Fingers are stiff or clenched; rear hand is floating and not anchored; string does not touch chin and nose; "
|
| 420 |
+
"before/after release the rear hand slowly slides backward; at release the rear hand stays in place or moves outward/forward; "
|
| 421 |
+
"at release the front/bow hand grabs the bow.\n"
|
| 422 |
+
),
|
| 423 |
+
"zh": (
|
| 424 |
+
"\n任务:\n"
|
| 425 |
+
"你将看到一组按时间顺序排列的关键帧(从早到晚).\n"
|
| 426 |
+
"从观察中观察并判断下面的动作,如果符合则关键词序列中直接加入对应标准后第一个括号内词库中的正确动作字符串,反之加上对应的错误动作字符串(正确动作在前,错误动作在后),并按照评分规则(每行词库后的括号)对分数进行初评,再按[生物力学特征信息],[模范]中的信息修正你的评价(如果出现)\n"
|
| 427 |
+
"1.身体是否直立(R:身体纵轴垂直于地面/W:身体重心歪斜)(可认为基本垂直的情况躯干部位给4/5分,出现歪斜给2/3分,请在给出的第一帧中判断)\n"
|
| 428 |
+
"2.身体是否转到侧对(一般给出R:身体侧向对着射箭方向 的评价,除非极其离谱,给出W:身体没有转到侧对)\n"
|
| 429 |
+
"3.头部是否转动到位(R:头部水平转动/W:头部没有转动到位)(转动到位给5分,不到位给3分,极其离谱情况给1/2分,请在给出的第一帧中判断)\n"
|
| 430 |
+
"4.手臂是否成一条直线(R:前手,前肩,后肩在一条线上,后手小臂与箭在一条线上/W:小臂与箭有角度)(后肩部有角度给2/3分,否则4/5分,请在给出的第一帧中判断)\n"
|
| 431 |
+
"5.后手是否牢牢靠在下巴上(R:后手靠在脸颊下颌位置,弓弦靠在下巴和鼻子上/W:后手悬空没有贴实,弦没有贴上下巴和鼻子)(手部评分规则参看[手部评分规则])\n"
|
| 432 |
+
"6.握弓手的手指是否放松,以在放箭之后,弓箭是否自然向前转动为标准(R:推弓手手指放松/W:手指僵直或握紧)\n"
|
| 433 |
+
"7.后手在未放箭的瞄准阶段是否有松弦现象(一般都给出R:瞄准时后手没有松滑弦 的评价/过于离谱给出W:瞄准时后手松滑弦)\n"
|
| 434 |
+
"8.双脚开步与肩同宽(R:双脚开步与肩同宽(基本满足,足部分数基本5分)/W:双脚随意站立(出现了,评价也不应低于三分))\n"
|
| 435 |
+
"9.放箭时后手是否在箭射出时向后延展(随动)(R:撒放时后手顺势向后延展/W:撒放时后手定在原地或向外,向前)\n"
|
| 436 |
+
"10.放箭时前手是否放松推弓(箭射出后握弓手保持放松,弓自然转动)(R:撒放时前手放松推弓/W:撒放时前手握弓)\n"
|
| 437 |
+
"随后按要求输出JSON,给出初步评分与关键词化comment(关键词应精炼,必须使用以下的表达(符合哪项表达就使用哪项表达))\n"
|
| 438 |
+
"评分规则:\n"
|
| 439 |
+
"[手部评分规则]:三条标准:靠下巴(手指必须盖住下巴的一部分,牢牢贴合才算贴合,否则就是没有贴合),撒放向后移动,推弓(撒放后弓必须自然转动)这三条完全满足5分,少一条扣一分,第一条请在给出的第一帧中判断,其余两条请在后面撒放的关键帧序列中比对手部位置差异来判断"
|
| 440 |
+
),
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
system_metrics_rubric_map = {
|
| 444 |
+
"en": (
|
| 445 |
+
"\nMetrics rubric (use ONLY for judgment):\n"
|
| 446 |
+
"Do NOT include any metric numeric values OR threshold numbers in 'comment'; 'comment' must contain no numbers.\n"
|
| 447 |
+
"(1) Hand-Shoulder-Elbow Angle: >=175.67 Excellent (arm=5, MUST include a positive keyword about arm straightness); "
|
| 448 |
+
"172.26-175.67 Good (arm=4, MUST include a positive keyword); "
|
| 449 |
+
"128.64-172.26 Average (if >=150 then arm=3, otherwise arm=2, MUST include a negative keyword about arm straightness); "
|
| 450 |
+
"<128.64 Poor (arm=1-2, MUST include a negative keyword).\n"
|
| 451 |
+
"(2) Hand-to-Chin Distance (two-sided target range): 232.57-274.02 Excellent "
|
| 452 |
+
"(if neither 'follow-through rear hand extends backward' nor 'bow hand relaxes and pushes' appears, hand=3; "
|
| 453 |
+
"if exactly one appears, hand=4; if both appear, hand=5; MUST include a positive keyword about hand anchored at chin; "
|
| 454 |
+
"include other hand keywords if applicable); "
|
| 455 |
+
"228.58-232.57 OR 274.02-278.02 Average "
|
| 456 |
+
"(if neither of the two release actions appears, hand=2; if one or both appear, hand=3; MUST include a negative keyword about hand anchored at chin; "
|
| 457 |
+
"include other hand keywords if applicable); "
|
| 458 |
+
"otherwise assign hand=0/1 as appropriate and use ONLY negative keywords.\n"
|
| 459 |
+
"(3) X-difference of two midpoints (two-sided target range): 61.95-87.05 Excellent "
|
| 460 |
+
"(if 68-80 then torso=5, else torso=4; MUST include ALL positive torso keywords); "
|
| 461 |
+
"53.54-61.95 OR 87.05-95.46 Good (torso=3; may include negative keywords; MUST NOT include any positive torso keywords); "
|
| 462 |
+
"45.99-53.54 OR 95.46-103.00 Average (torso=2; MUST include negative keywords); "
|
| 463 |
+
"<45.99 OR >103.00 Poor (torso=0/1; MUST include negative keywords)."
|
| 464 |
+
),
|
| 465 |
+
"zh": (
|
| 466 |
+
"\n[生物力学特征信息](判断特定部位动作质量的关键依据,请严格根据指标数值和下面的规则修正你的评分和评估文本.下文中要给出的评价请均在词库中提取):\n"
|
| 467 |
+
"不要在 comment 中出现任何指标数值或阈值数字;comment 中不得出现数字.\n"
|
| 468 |
+
"(1)max_angle_avg:>=175.67 优秀(手臂部位5分,必须给出有关手臂伸直的正面评价(十分接近这个值也认定为优秀));172.26-175.67 良好(4分,同样必须给出正面评价);"
|
| 469 |
+
"128.64-172.26 中等(大于150度3分,小于且在区间内给2分,必须给出有关手臂伸直的负面评价);<128.64 较差(1-2,同样必须给出负面评价).\n"
|
| 470 |
+
# "(2)min_dist_avg:232.57-274.02 优秀(如果没有向后延展和推弓动作则手部为3分,具备一个4分,两个都具备5分,一定给出手贴下颌的正面评价,其他评价若具备则给出);"
|
| 471 |
+
# "228.58-232.57 或 274.02-278.02 一般(如果没有向后延展和推弓动作则手部为2分,具备一个/两个3分,一定给出手贴下颌的负面评价,其他评价若具备则给出);"
|
| 472 |
+
# "其余情况酌情给0/1分,评价均为负面评价.\n"
|
| 473 |
+
# "(3)min_x_diff_avg:61.95-87.05 优秀(68-80躯干部位5分,否则4分,必须给出全部的躯干正面评价);"
|
| 474 |
+
# "53.54-61.95 或 87.05-95.46 良好(躯干部位3分,可酌情给出负面评价,一定不出现正面评价);"
|
| 475 |
+
# "45.99-53.54 或 95.46-103.00 中等(2分,一定给出负面评价);<45.99 或 >103.00 较差(0/1分,一定给出负面评价)."
|
| 476 |
+
),
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
system_model_map = {
|
| 480 |
+
"en": (
|
| 481 |
+
""
|
| 482 |
+
),
|
| 483 |
+
"zh": (
|
| 484 |
+
"\n[模范]模板动作关键帧序列(请根据模范动作对你看到的动作进行严格对比评估(尤其注意头部和手部的结论更正),修正你的评估文本各部位及总体评分(例如和模范动作对比后发现动作评估有错误,就修改该错误并将对应部位评分提高,反之降低).模范动作每个部位及总体的动作质量均为满分)\n"
|
| 485 |
+
"注意,一定要仔细审查头部,手部,手臂的动作特点是否和模范动作完全相同再做出修正判断(手臂要保持直线,手部要靠位正确,有后延和推弓动作才可以满分,必须严格遵守前文提到的[评分规则])"
|
| 486 |
+
),
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
if pipeline == 1:
|
| 490 |
+
system_prompt = system_base_map[lang] + system_task_video_map[lang]
|
| 491 |
+
|
| 492 |
+
video_base64 = video_to_base64(video_path)
|
| 493 |
+
completion = client.chat.completions.create(
|
| 494 |
+
model=model_name,
|
| 495 |
+
messages=[
|
| 496 |
+
{
|
| 497 |
+
"role": "system",
|
| 498 |
+
"content": system_prompt,
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"role": "user",
|
| 502 |
+
"content": [
|
| 503 |
+
{
|
| 504 |
+
"type": "video_url",
|
| 505 |
+
"video_url": {
|
| 506 |
+
"url": f"data:video/mp4;base64,{video_base64}"
|
| 507 |
+
},
|
| 508 |
+
},
|
| 509 |
+
],
|
| 510 |
+
}
|
| 511 |
+
],
|
| 512 |
+
stream=True,
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
reasoning_content = ""
|
| 516 |
+
answer_content = ""
|
| 517 |
+
is_answering = False
|
| 518 |
+
for chunk in completion:
|
| 519 |
+
if not getattr(chunk, "choices", None):
|
| 520 |
+
continue
|
| 521 |
+
delta = chunk.choices[0].delta
|
| 522 |
+
if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
|
| 523 |
+
print(delta.reasoning_content, end='', flush=True)
|
| 524 |
+
reasoning_content += delta.reasoning_content
|
| 525 |
+
continue
|
| 526 |
+
if getattr(delta, "content", None):
|
| 527 |
+
if delta.content != "" and is_answering is False:
|
| 528 |
+
print("\n" + "=" * 20 + "Complete Response" + "=" * 20 + "\n")
|
| 529 |
+
is_answering = True
|
| 530 |
+
print(delta.content, end='', flush=True)
|
| 531 |
+
answer_content += delta.content
|
| 532 |
+
return _parse_assessment_payload(answer_content)
|
| 533 |
+
|
| 534 |
+
# pipeline 2/3/4: keyframes extraction
|
| 535 |
+
data, normalized_data = Keypoint_Extract(
|
| 536 |
+
video_path,
|
| 537 |
+
show_draw_selection=show,
|
| 538 |
+
display_wait=1,
|
| 539 |
+
image_width=1920,
|
| 540 |
+
image_height=1080,
|
| 541 |
+
draw_math_feature_points=show,
|
| 542 |
+
)
|
| 543 |
+
base_keyframes = extract_keyframes_with_ruptures_poseparts_2d(normalized_data, k=target_k + 3)
|
| 544 |
+
key_frame_lists = refine_keyframes_with_absdiff(
|
| 545 |
+
video_path=video_path,
|
| 546 |
+
keyframe_result=base_keyframes,
|
| 547 |
+
k=target_k,
|
| 548 |
+
)
|
| 549 |
+
keyframes_dict = extract_show_keyframes_by_index(video_path, key_frame_lists, show=show)
|
| 550 |
+
keyframe_image_items = list(keyframes_dict.get("openai_input_images", []) or [])
|
| 551 |
+
if not keyframe_image_items:
|
| 552 |
+
raise ValueError("No keyframes extracted; pipeline 2/3/4 requires non-empty keyframes.")
|
| 553 |
+
|
| 554 |
+
if pipeline == 2:
|
| 555 |
+
system_prompt = system_base_map[lang] + system_task_keyframes_map[lang]
|
| 556 |
+
ordering_note = (
|
| 557 |
+
"Keyframes are ordered from early to late."
|
| 558 |
+
if lang == "en"
|
| 559 |
+
else "关键帧按时间从早到晚排序."
|
| 560 |
+
)
|
| 561 |
+
raw_user_content = [{"type": "input_text", "text": ordering_note}] + keyframe_image_items
|
| 562 |
+
chat_user_content = _to_chat_content(raw_user_content)
|
| 563 |
+
resp = client.chat.completions.create(
|
| 564 |
+
model=model_name,
|
| 565 |
+
messages=[
|
| 566 |
+
{"role": "system", "content": system_prompt},
|
| 567 |
+
{"role": "user", "content": chat_user_content},
|
| 568 |
+
],
|
| 569 |
+
)
|
| 570 |
+
return _parse_assessment_payload(_chat_completion_output_text(resp))
|
| 571 |
+
|
| 572 |
+
math_feature = cal_math_features(keypoints_data=data,plot_angle_curve=show)
|
| 573 |
+
metrics = extract_action_features(math_feature)
|
| 574 |
+
system_prompt = system_base_map[lang] + system_task_keyframes_map[lang]
|
| 575 |
+
if show == True:
|
| 576 |
+
print("max_angle_avg:", metrics.get("max_angle_avg"))
|
| 577 |
+
print("min_dist_avg:", metrics.get("min_dist_avg"))
|
| 578 |
+
print("min_x_diff_avg:", metrics.get("min_x_diff_avg"))
|
| 579 |
+
for k in ("max_angle_avg", "min_dist_avg", "min_x_diff_avg"):
|
| 580 |
+
if k not in metrics:
|
| 581 |
+
raise ValueError(f"Missing metric: {k}")
|
| 582 |
+
|
| 583 |
+
ordering_note = (
|
| 584 |
+
"Keyframes are ordered from early to late."
|
| 585 |
+
if lang == "en"
|
| 586 |
+
else "关键帧按时间从早到晚排序."
|
| 587 |
+
)
|
| 588 |
+
metrics_header = "\n\nMetrics:\n" if lang == "en" else "\n\n指标:\n"
|
| 589 |
+
metrics_text = (
|
| 590 |
+
metrics_header
|
| 591 |
+
+ f"- max_angle_avg: {metrics['max_angle_avg']}\n"
|
| 592 |
+
# + f"- min_dist_avg: {metrics['min_dist_avg']}\n"
|
| 593 |
+
# + f"- min_x_diff_avg: {metrics['min_x_diff_avg']}"
|
| 594 |
+
)
|
| 595 |
+
if pipeline == 3:
|
| 596 |
+
p3_system_prompt = system_prompt + system_metrics_rubric_map[lang]
|
| 597 |
+
user_text = ordering_note #+ metrics_text
|
| 598 |
+
raw_user_content = [{"type": "input_text", "text": user_text}] + keyframe_image_items
|
| 599 |
+
chat_user_content = _to_chat_content(raw_user_content)
|
| 600 |
+
resp = client.chat.completions.create(
|
| 601 |
+
model=model_name,
|
| 602 |
+
messages=[
|
| 603 |
+
{"role": "system", "content": p3_system_prompt},
|
| 604 |
+
{"role": "user", "content": chat_user_content},
|
| 605 |
+
],
|
| 606 |
+
)
|
| 607 |
+
return _parse_assessment_payload(_chat_completion_output_text(resp))
|
| 608 |
+
|
| 609 |
+
if pipeline == 4:
|
| 610 |
+
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
|
| 611 |
+
default_template_dir = os.path.join(repo_root, "output_keyframes")
|
| 612 |
+
template_dir = template_keyframes_dir if template_keyframes_dir else default_template_dir
|
| 613 |
+
if not os.path.isabs(template_dir):
|
| 614 |
+
template_dir = os.path.abspath(os.path.join(repo_root, template_dir))
|
| 615 |
+
template_keyframe_items = _load_openai_images_from_folder(template_dir)
|
| 616 |
+
|
| 617 |
+
pipeline4_system_prompt = system_prompt + str(system_model_map.get(lang, "") or "") #+ system_metrics_rubric_map[lang]
|
| 618 |
+
if lang == "en":
|
| 619 |
+
student_note = "Student keyframes are ordered from early to late." #+ metrics_text
|
| 620 |
+
template_note = "Template keyframes are ordered from early to late."
|
| 621 |
+
else:
|
| 622 |
+
student_note = "学生关键帧按时间从早到晚排序." #+ metrics_text
|
| 623 |
+
template_note = "模范关键帧按时间从早到晚排序."
|
| 624 |
+
|
| 625 |
+
raw_user_content = (
|
| 626 |
+
[{"type": "input_text", "text": student_note}]
|
| 627 |
+
+ keyframe_image_items
|
| 628 |
+
+ [{"type": "input_text", "text": template_note}]
|
| 629 |
+
+ template_keyframe_items
|
| 630 |
+
)
|
| 631 |
+
chat_user_content = _to_chat_content(raw_user_content)
|
| 632 |
+
resp = client.chat.completions.create(
|
| 633 |
+
model=model_name,
|
| 634 |
+
messages=[
|
| 635 |
+
{"role": "system", "content": pipeline4_system_prompt},
|
| 636 |
+
{"role": "user", "content": chat_user_content},
|
| 637 |
+
],
|
| 638 |
+
)
|
| 639 |
+
return _parse_assessment_payload(_chat_completion_output_text(resp))
|
| 640 |
+
|
| 641 |
+
_PART_ALIASES = {
|
| 642 |
+
"head": ["head", "neck", "头", "头部", "颈", "颈部"],
|
| 643 |
+
"hand": ["hand", "hands", "palm", "fingers", "wrist", "手", "手部", "手指", "手腕", "后手", "前手", "握弓手", "推弓手"],
|
| 644 |
+
"arm": ["arm", "arms", "elbow", "shoulder", "forearm", "upperarm", "手臂", "臂", "肩", "肩部", "肘", "肘部", "小臂", "大臂", "共线", "一条线", "直线"],
|
| 645 |
+
"torso": ["torso", "body", "trunk", "core", "posture", "躯干", "身体", "中轴", "重心", "背部", "上身"],
|
| 646 |
+
"foot": ["foot", "feet", "leg", "stance", "脚", "足", "足部", "双脚", "步态", "站姿", "开步", "与肩同宽"],
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
def _normalize_language_qa(language: str) -> str:
|
| 650 |
+
language_norm = str(language or "en").strip().lower()
|
| 651 |
+
if language_norm in ("zh", "zh-cn", "zh_hans", "cn", "chinese", "中文"):
|
| 652 |
+
return "zh"
|
| 653 |
+
if language_norm in ("en", "en-us", "english"):
|
| 654 |
+
return "en"
|
| 655 |
+
return "zh"
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
def _normalize_text_for_match(text: str) -> str:
|
| 659 |
+
return re.sub(r"[\s_\-]+", "", str(text or "").strip().lower())
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def _normalize_part_name(part: str):
|
| 663 |
+
part_norm = _normalize_text_for_match(part)
|
| 664 |
+
for canon_part, aliases in _PART_ALIASES.items():
|
| 665 |
+
for alias in aliases:
|
| 666 |
+
if part_norm == _normalize_text_for_match(alias):
|
| 667 |
+
return canon_part
|
| 668 |
+
return None
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def _sanitize_question_text(question: str) -> str:
|
| 672 |
+
text = str(question or "")
|
| 673 |
+
text = re.sub(r"[^A-Za-z0-9\u4e00-\u9fff\s]", " ", text)
|
| 674 |
+
text = re.sub(r"\s+", " ", text).strip()
|
| 675 |
+
if not text:
|
| 676 |
+
raise ValueError("Question is empty after sanitization.")
|
| 677 |
+
return text
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
def _extract_json_object_text(raw_text: str) -> str:
|
| 681 |
+
text = str(raw_text or "").strip()
|
| 682 |
+
if not text:
|
| 683 |
+
raise ValueError("LLM output is empty.")
|
| 684 |
+
decoder = json.JSONDecoder()
|
| 685 |
+
for i, ch in enumerate(text):
|
| 686 |
+
if ch != "{":
|
| 687 |
+
continue
|
| 688 |
+
try:
|
| 689 |
+
parsed, end = decoder.raw_decode(text[i:])
|
| 690 |
+
except json.JSONDecodeError:
|
| 691 |
+
continue
|
| 692 |
+
if isinstance(parsed, dict):
|
| 693 |
+
return text[i : i + end]
|
| 694 |
+
raise ValueError("No valid JSON object found in LLM output.")
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def _chat_completion_output_text(resp) -> str:
|
| 698 |
+
if resp is None or not getattr(resp, "choices", None):
|
| 699 |
+
raise ValueError("Chat completion returned empty response.")
|
| 700 |
+
message = getattr(resp.choices[0], "message", None)
|
| 701 |
+
content = getattr(message, "content", None)
|
| 702 |
+
if isinstance(content, str):
|
| 703 |
+
return content.strip()
|
| 704 |
+
if isinstance(content, list):
|
| 705 |
+
parts = []
|
| 706 |
+
for item in content:
|
| 707 |
+
if isinstance(item, dict):
|
| 708 |
+
txt = item.get("text")
|
| 709 |
+
if txt:
|
| 710 |
+
parts.append(str(txt))
|
| 711 |
+
else:
|
| 712 |
+
txt = getattr(item, "text", None)
|
| 713 |
+
if txt:
|
| 714 |
+
parts.append(str(txt))
|
| 715 |
+
return "".join(parts).strip()
|
| 716 |
+
return str(content or "").strip()
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
def _split_question_with_llm(question_clean: str, language: str, model: str) -> dict:
|
| 720 |
+
lang = _normalize_language_qa(language)
|
| 721 |
+
client = OpenAI(
|
| 722 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 723 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
| 724 |
+
)
|
| 725 |
+
system_prompt_map = {
|
| 726 |
+
"zh": (
|
| 727 |
+
"你是一个专业的问答问题拆分器,面向“视频中人的射箭动作分析”场景."
|
| 728 |
+
"你的任务是把用户问题字符串整理提炼为后续大模型可以正确理解的两类内容并识别下文提到的两个特征,并返回JSON对象字符串.\n"
|
| 729 |
+
"JSON字段固定为technical_part(内容1), knowledge_part(内容2), technical_scope(特征1), target_parts(特征2)\n"
|
| 730 |
+
"1)technical_part:与视频中人物射箭动作技术相关的提问(字符串)\n"
|
| 731 |
+
"2)knowledge_part:与射箭相关的通用知识内容(如规则、器材、术语解释、训练方法原理、比赛常识等)(字符串).\n"
|
| 732 |
+
"特征1的所有可能结果为:overall/parts/none.判断准则如下:\n"
|
| 733 |
+
"- overall:技术问题主要针对整体动作或完整流程.\n"
|
| 734 |
+
"- parts:技术问题主要针对具体身体部位.\n"
|
| 735 |
+
"- none:问题不涉及视频动作技术.\n"
|
| 736 |
+
"如特征1的结果为parts,则对特征2进行识别,找出技术问题主要针对下列哪个或哪些身体部位:head/hand/arm/torso/foot,结果应该为列表.若特征1为其他结果则返回空列表."
|
| 737 |
+
"根据问题指向选择一个或多个部位;否则 target_parts 为空列表."
|
| 738 |
+
),
|
| 739 |
+
"en": (
|
| 740 |
+
"You are a QA question splitter. "
|
| 741 |
+
"Output JSON object string only, with keys: technical_part, knowledge_part, technical_scope, target_parts. "
|
| 742 |
+
"technical_scope must be one of overall/parts/none. "
|
| 743 |
+
"target_parts can only include head/hand/arm/torso/foot. "
|
| 744 |
+
"Use empty string/list if a field is not applicable."
|
| 745 |
+
),
|
| 746 |
+
}
|
| 747 |
+
user_prompt_map = {
|
| 748 |
+
"zh": f"问题:{question_clean}\n",
|
| 749 |
+
"en": f"Question: {question_clean}\nSplit the question and return JSON only.",
|
| 750 |
+
}
|
| 751 |
+
|
| 752 |
+
resp = client.chat.completions.create(
|
| 753 |
+
model=model,
|
| 754 |
+
messages=[
|
| 755 |
+
{"role": "system", "content": system_prompt_map[lang]},
|
| 756 |
+
{"role": "user", "content": user_prompt_map[lang]},
|
| 757 |
+
],
|
| 758 |
+
temperature=0.1,
|
| 759 |
+
)
|
| 760 |
+
raw_text = _chat_completion_output_text(resp)
|
| 761 |
+
payload = json.loads(_extract_json_object_text(raw_text))
|
| 762 |
+
if not isinstance(payload, dict):
|
| 763 |
+
raise ValueError("Question split output is not a JSON object.")
|
| 764 |
+
|
| 765 |
+
technical_part = str(payload.get("technical_part", "") or "").strip()
|
| 766 |
+
knowledge_part = str(payload.get("knowledge_part", "") or "").strip()
|
| 767 |
+
technical_scope = str(payload.get("technical_scope", "none") or "none").strip().lower()
|
| 768 |
+
if technical_scope not in {"overall", "parts", "none"}:
|
| 769 |
+
technical_scope = "none"
|
| 770 |
+
|
| 771 |
+
target_parts_raw = payload.get("target_parts", [])
|
| 772 |
+
if not isinstance(target_parts_raw, list):
|
| 773 |
+
target_parts_raw = []
|
| 774 |
+
target_parts = []
|
| 775 |
+
for part in target_parts_raw:
|
| 776 |
+
canon_part = _normalize_part_name(str(part))
|
| 777 |
+
if canon_part and canon_part not in target_parts:
|
| 778 |
+
target_parts.append(canon_part)
|
| 779 |
+
return {
|
| 780 |
+
"technical_part": technical_part,
|
| 781 |
+
"knowledge_part": knowledge_part,
|
| 782 |
+
"technical_scope": technical_scope,
|
| 783 |
+
"target_parts": target_parts,
|
| 784 |
+
}
|
| 785 |
+
|
| 786 |
+
def _map_keyword_to_part(keyword: str, language: str):
|
| 787 |
+
_ = _normalize_language_qa(language)
|
| 788 |
+
keyword_norm = _normalize_text_for_match(keyword)
|
| 789 |
+
if not keyword_norm:
|
| 790 |
+
return None
|
| 791 |
+
|
| 792 |
+
part_order = ["arm", "hand", "torso", "head", "foot"]
|
| 793 |
+
best_part = None
|
| 794 |
+
best_count = 0
|
| 795 |
+
best_first_pos = None
|
| 796 |
+
|
| 797 |
+
for part in part_order:
|
| 798 |
+
part_count = 0
|
| 799 |
+
part_first_pos = None
|
| 800 |
+
for alias in _PART_ALIASES.get(part, []):
|
| 801 |
+
alias_norm = _normalize_text_for_match(alias)
|
| 802 |
+
if not alias_norm:
|
| 803 |
+
continue
|
| 804 |
+
hit_count = keyword_norm.count(alias_norm)
|
| 805 |
+
if hit_count <= 0:
|
| 806 |
+
continue
|
| 807 |
+
part_count += hit_count
|
| 808 |
+
hit_pos = keyword_norm.find(alias_norm)
|
| 809 |
+
if hit_pos >= 0 and (part_first_pos is None or hit_pos < part_first_pos):
|
| 810 |
+
part_first_pos = hit_pos
|
| 811 |
+
|
| 812 |
+
if part_count <= 0:
|
| 813 |
+
continue
|
| 814 |
+
|
| 815 |
+
if best_part is None:
|
| 816 |
+
best_part = part
|
| 817 |
+
best_count = part_count
|
| 818 |
+
best_first_pos = part_first_pos
|
| 819 |
+
continue
|
| 820 |
+
|
| 821 |
+
if part_count > best_count:
|
| 822 |
+
best_part = part
|
| 823 |
+
best_count = part_count
|
| 824 |
+
best_first_pos = part_first_pos
|
| 825 |
+
continue
|
| 826 |
+
|
| 827 |
+
if part_count == best_count:
|
| 828 |
+
current_pos = part_first_pos if part_first_pos is not None else 10**9
|
| 829 |
+
best_pos = best_first_pos if best_first_pos is not None else 10**9
|
| 830 |
+
if current_pos < best_pos:
|
| 831 |
+
best_part = part
|
| 832 |
+
best_first_pos = part_first_pos
|
| 833 |
+
|
| 834 |
+
if best_part:
|
| 835 |
+
return best_part
|
| 836 |
+
return None
|
| 837 |
+
|
| 838 |
+
def _filter_keywords_for_parts(keywords: list[str], parts: list[str], language: str) -> list[str]:
|
| 839 |
+
target_parts = []
|
| 840 |
+
for part in parts:
|
| 841 |
+
canon_part = _normalize_part_name(part)
|
| 842 |
+
if canon_part and canon_part not in target_parts:
|
| 843 |
+
target_parts.append(canon_part)
|
| 844 |
+
if not target_parts:
|
| 845 |
+
return []
|
| 846 |
+
|
| 847 |
+
filtered = []
|
| 848 |
+
seen = set()
|
| 849 |
+
for kw in keywords:
|
| 850 |
+
kw_str = str(kw).strip()
|
| 851 |
+
if not kw_str:
|
| 852 |
+
continue
|
| 853 |
+
part = _map_keyword_to_part(kw_str, language)
|
| 854 |
+
if part in target_parts and kw_str not in seen:
|
| 855 |
+
filtered.append(kw_str)
|
| 856 |
+
seen.add(kw_str)
|
| 857 |
+
return filtered
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
# def _retrieve_by_keywords(
|
| 861 |
+
# keywords: list[str],
|
| 862 |
+
# session,
|
| 863 |
+
# embedding_model: str,
|
| 864 |
+
# top_k_per_keyword: int,
|
| 865 |
+
# ) -> list[str]:
|
| 866 |
+
# from RAG.Knowledge_Database.AI_dbmanager import KnowledgeDB
|
| 867 |
+
|
| 868 |
+
# db = KnowledgeDB(session=session)
|
| 869 |
+
# snippets = []
|
| 870 |
+
# seen = set()
|
| 871 |
+
# for kw in keywords:
|
| 872 |
+
# kw_text = str(kw).strip()
|
| 873 |
+
# if not kw_text:
|
| 874 |
+
# continue
|
| 875 |
+
# try:
|
| 876 |
+
# search_results = db.search(
|
| 877 |
+
# kw_text,
|
| 878 |
+
# embed_fn=get_embedding,
|
| 879 |
+
# model_name=embedding_model,
|
| 880 |
+
# top_k=top_k_per_keyword,
|
| 881 |
+
# )
|
| 882 |
+
# except Exception as e:
|
| 883 |
+
# print(f"[QA] keyword retrieval failed for '{kw_text}': {e}")
|
| 884 |
+
# continue
|
| 885 |
+
|
| 886 |
+
# for result in search_results:
|
| 887 |
+
# chunk = result[0] if isinstance(result, tuple) else result
|
| 888 |
+
# chunk_text = getattr(chunk, "text", None)
|
| 889 |
+
# if chunk_text is None and isinstance(chunk, str):
|
| 890 |
+
# chunk_text = chunk
|
| 891 |
+
# chunk_text = str(chunk_text or "").strip()
|
| 892 |
+
# if not chunk_text or chunk_text in seen:
|
| 893 |
+
# continue
|
| 894 |
+
# snippets.append(chunk_text)
|
| 895 |
+
# seen.add(chunk_text)
|
| 896 |
+
# return snippets
|
| 897 |
+
|
| 898 |
+
|
| 899 |
+
def _retrieve_knowledge_by_question(
|
| 900 |
+
question_text: str,
|
| 901 |
+
session,
|
| 902 |
+
embedding_model: str,
|
| 903 |
+
title_top_k: int,
|
| 904 |
+
chunk_top_k: int,
|
| 905 |
+
) -> list[str]:
|
| 906 |
+
from RAG.Knowledge_Database.AI_dbmanager import KnowledgeDB
|
| 907 |
+
|
| 908 |
+
db = KnowledgeDB(session=session)
|
| 909 |
+
try:
|
| 910 |
+
search_results = db.search_knowledge_two_stage(
|
| 911 |
+
str(question_text or "").strip(),
|
| 912 |
+
embed_fn=get_embedding,
|
| 913 |
+
model_name=embedding_model,
|
| 914 |
+
title_top_k=title_top_k,
|
| 915 |
+
chunk_top_k=chunk_top_k,
|
| 916 |
+
)
|
| 917 |
+
except Exception as e:
|
| 918 |
+
print(f"[QA] knowledge retrieval failed: {e}")
|
| 919 |
+
return []
|
| 920 |
+
|
| 921 |
+
snippets = []
|
| 922 |
+
seen = set()
|
| 923 |
+
for result in search_results:
|
| 924 |
+
chunk = result[0] if isinstance(result, tuple) else result
|
| 925 |
+
chunk_text = getattr(chunk, "text", None)
|
| 926 |
+
if chunk_text is None and isinstance(chunk, str):
|
| 927 |
+
chunk_text = chunk
|
| 928 |
+
chunk_text = str(chunk_text or "").strip()
|
| 929 |
+
if not chunk_text or chunk_text in seen:
|
| 930 |
+
continue
|
| 931 |
+
snippets.append(chunk_text)
|
| 932 |
+
seen.add(chunk_text)
|
| 933 |
+
return snippets
|
| 934 |
+
|
| 935 |
+
def _compress_eval_text(
|
| 936 |
+
eval_text: str,
|
| 937 |
+
scope: str,
|
| 938 |
+
target_parts: list[str],
|
| 939 |
+
language: str,
|
| 940 |
+
model: str,
|
| 941 |
+
) -> str:
|
| 942 |
+
text = str(eval_text or "").strip()
|
| 943 |
+
if not text:
|
| 944 |
+
return ""
|
| 945 |
+
|
| 946 |
+
scope_norm = str(scope or "overall").strip().lower()
|
| 947 |
+
target_part_list = []
|
| 948 |
+
for part in target_parts or []:
|
| 949 |
+
canon_part = _normalize_part_name(part)
|
| 950 |
+
if canon_part and canon_part not in target_part_list:
|
| 951 |
+
target_part_list.append(canon_part)
|
| 952 |
+
target_part_text = ", ".join(target_part_list) if target_part_list else "(none)"
|
| 953 |
+
|
| 954 |
+
lang = _normalize_language_qa(language)
|
| 955 |
+
client = OpenAI(
|
| 956 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 957 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
| 958 |
+
)
|
| 959 |
+
system_prompt_map = {
|
| 960 |
+
"zh": (
|
| 961 |
+
"你是射箭评估文本提取与语言修缮助手.\n"
|
| 962 |
+
"若 scope=parts: 只提取与 target_parts 对应部位相关的评价信息,忽略其他部位.\n"
|
| 963 |
+
"若 scope=overall: 提炼整体动作评价信息.\n"
|
| 964 |
+
"将结果压缩成1-2句动作要点,不要添加原文之外的信息.\n"
|
| 965 |
+
"若 scope=parts 且原文无对应部位信息,返回空字符串.\n"
|
| 966 |
+
"输出仅为纯文本."
|
| 967 |
+
),
|
| 968 |
+
"en": (
|
| 969 |
+
"You are an archery-evaluation extraction and summarization assistant.\n"
|
| 970 |
+
"If scope=parts: extract ONLY evaluation content related to target_parts and ignore other parts.\n"
|
| 971 |
+
"If scope=overall: summarize the overall action evaluation.\n"
|
| 972 |
+
"Compress into 1-2 short action-focused sentences without adding extra facts.\n"
|
| 973 |
+
"If scope=parts and no relevant information exists, return an empty string.\n"
|
| 974 |
+
"Output plain text only."
|
| 975 |
+
),
|
| 976 |
+
}
|
| 977 |
+
user_prompt_map = {
|
| 978 |
+
"zh": (
|
| 979 |
+
f"scope: {scope_norm}\n"
|
| 980 |
+
f"target_parts: {target_part_text}\n"
|
| 981 |
+
f"原文: {text}\n"
|
| 982 |
+
),
|
| 983 |
+
"en": (
|
| 984 |
+
f"scope: {scope_norm}\n"
|
| 985 |
+
f"target_parts: {target_part_text}\n"
|
| 986 |
+
f"source: {text}\n"
|
| 987 |
+
"Extract first, then summarize."
|
| 988 |
+
),
|
| 989 |
+
}
|
| 990 |
+
try:
|
| 991 |
+
resp = client.chat.completions.create(
|
| 992 |
+
model=model,
|
| 993 |
+
messages=[
|
| 994 |
+
{"role": "system", "content": system_prompt_map[lang]},
|
| 995 |
+
{"role": "user", "content": user_prompt_map[lang]},
|
| 996 |
+
],
|
| 997 |
+
temperature=0.2,
|
| 998 |
+
)
|
| 999 |
+
summary = _chat_completion_output_text(resp).strip()
|
| 1000 |
+
if summary:
|
| 1001 |
+
return summary
|
| 1002 |
+
except Exception as e:
|
| 1003 |
+
print(f"[QA] eval text compression failed: {e}")
|
| 1004 |
+
|
| 1005 |
+
if scope_norm == "parts":
|
| 1006 |
+
return ""
|
| 1007 |
+
return text[:220]
|
| 1008 |
+
|
| 1009 |
+
|
| 1010 |
+
def _compose_qa_answer(
|
| 1011 |
+
question_tech: str,
|
| 1012 |
+
tech_summary: str,
|
| 1013 |
+
keywords: list[str],
|
| 1014 |
+
knowledge_snippets: list[str],
|
| 1015 |
+
language: str,
|
| 1016 |
+
tech_suggestions = None,
|
| 1017 |
+
knowledge_question: str = "",
|
| 1018 |
+
model: str = "qwen-plus",
|
| 1019 |
+
) -> str:
|
| 1020 |
+
|
| 1021 |
+
lang = _normalize_language_qa(language)
|
| 1022 |
+
tech_summary = str(tech_summary or "").strip()
|
| 1023 |
+
keywords = [str(k).strip() for k in (keywords or []) if str(k).strip()]
|
| 1024 |
+
tech_suggestions = [str(s).strip() for s in (tech_suggestions or []) if str(s).strip()]
|
| 1025 |
+
knowledge_question = str(knowledge_question or "").strip()
|
| 1026 |
+
knowledge_snippets = [str(s).strip() for s in (knowledge_snippets or []) if str(s).strip()]
|
| 1027 |
+
|
| 1028 |
+
client = OpenAI(
|
| 1029 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 1030 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
| 1031 |
+
)
|
| 1032 |
+
keyword_text = ", ".join(keywords[:12]) if keywords else "(none)"
|
| 1033 |
+
suggestion_seed = ";".join(tech_suggestions[:8]) if tech_suggestions else "(none)"
|
| 1034 |
+
snippet_text = "\n".join([f"- {s}" for s in knowledge_snippets[:8]]) if knowledge_snippets else "- (empty)"
|
| 1035 |
+
|
| 1036 |
+
if question_tech != "":
|
| 1037 |
+
if knowledge_question != "":
|
| 1038 |
+
system_prompt_map = {
|
| 1039 |
+
"zh": (
|
| 1040 |
+
"你是射箭问答整合助手.\n"
|
| 1041 |
+
"你需要在一次回答中完成三个模块并最终拼接输出,保证语义一致性,逻辑严谨性��专业性:\n"
|
| 1042 |
+
"1) 技术动作分析: 按照tech_summary给出当前针对部位的技术动作分析(尽可能使用原句进行分析)\n"
|
| 1043 |
+
"2) 改进建议: 依据keywords对question_tech给出改进建议,可使用你的通用知识.\n"
|
| 1044 |
+
"3) 知识问题回答: 仅依据knowledge_snippets中能够回答knowledge_question的片段进行回答"
|
| 1045 |
+
",不得引入 snippets 外信息,你的任务是将零碎的信息整合成段落.\n"
|
| 1046 |
+
"若 snippets 不足,必须写“根据已检索片段无法确定”.\n"
|
| 1047 |
+
"最后直接输出三行,格式固定为:\n"
|
| 1048 |
+
"对您问题中涉及的技术动作进行分析:...\n针对不足之处的改进建议:...\n关于知识问题的回答:...\n"
|
| 1049 |
+
"不要输出中间推理、不要分点编号、不要额外标题."
|
| 1050 |
+
),
|
| 1051 |
+
"en": (
|
| 1052 |
+
"You are an archery QA integrator.\n"
|
| 1053 |
+
"In ONE response, complete three modules and merge them with semantic consistency:\n"
|
| 1054 |
+
"1) Technical Action Analysis: based on tech_summary.\n"
|
| 1055 |
+
"2) Improvement Advice: based on question + keywords; general knowledge is allowed.\n"
|
| 1056 |
+
"3) Knowledge Answer: answer knowledge_question using ONLY knowledge_snippets.\n"
|
| 1057 |
+
"If snippets are insufficient, you must output 'Cannot determine from retrieved snippets.'\n"
|
| 1058 |
+
"Return exactly three lines:\n"
|
| 1059 |
+
"Technical Action Analysis:...\nImprovement Advice:...\nKnowledge Answer:...\n"
|
| 1060 |
+
"No extra headings or reasoning."
|
| 1061 |
+
),
|
| 1062 |
+
}
|
| 1063 |
+
else:
|
| 1064 |
+
system_prompt_map = {
|
| 1065 |
+
"zh": (
|
| 1066 |
+
"你是射箭问答整合助手.\n"
|
| 1067 |
+
"你需要在一次回答中完成两个模块并最终拼接输出,保证语义一致性,逻辑严谨性和专业性:\n"
|
| 1068 |
+
"1) 技术动作分析: 按照tech_summary给出当前针对部位的技术动作分析(尽可能使用原句进行分析)\n"
|
| 1069 |
+
"2) 改进建议: 依据 question_tech + keywords 给出改进建议,可使用你的通用知识.\n"
|
| 1070 |
+
",不得引入snippets外信息,你的任务是将零碎的信息整合成段落.\n"
|
| 1071 |
+
"若 snippets 不足,必须写“根据已检索片段无法确定”.\n"
|
| 1072 |
+
"最后直接输出两行,格式固定为:\n"
|
| 1073 |
+
"对您问题中涉及的技术动作进行分析:...\n针对不足之处的改进建议:...\n"
|
| 1074 |
+
"不要输出中间推理、不要分点编号、不要额外标题."
|
| 1075 |
+
),
|
| 1076 |
+
"en": (
|
| 1077 |
+
"You are an archery QA integrator.\n"
|
| 1078 |
+
"In ONE response, complete three modules and merge them with semantic consistency:\n"
|
| 1079 |
+
"1) Technical Action Analysis: based on tech_summary.\n"
|
| 1080 |
+
"2) Improvement Advice: based on question + keywords; general knowledge is allowed.\n"
|
| 1081 |
+
"3) Knowledge Answer: answer knowledge_question using ONLY knowledge_snippets.\n"
|
| 1082 |
+
"If snippets are insufficient, you must output 'Cannot determine from retrieved snippets.'\n"
|
| 1083 |
+
"Return exactly two lines:\n"
|
| 1084 |
+
"Technical Action Analysis:...\nImprovement Advice:...\n"
|
| 1085 |
+
"No extra headings or reasoning."
|
| 1086 |
+
),
|
| 1087 |
+
}
|
| 1088 |
+
else:
|
| 1089 |
+
system_prompt_map = {
|
| 1090 |
+
"zh": (
|
| 1091 |
+
"你是射箭问答整合助手.\n"
|
| 1092 |
+
"你需要在一次回答中完成一个模块并最终拼接输出,保证语义一致性,逻辑严谨性和专业性:\n"
|
| 1093 |
+
"1) 知识问题回答: 仅依据knowledge_snippets中能够回答knowledge_question的片段进行回答"
|
| 1094 |
+
",不得引入snippets外信息,你的任务是将零碎的信息整合成段落.\n"
|
| 1095 |
+
"若 snippets 不足,必须写“根据已检索片段无法确定”.\n"
|
| 1096 |
+
"最后直接输出一行,格式固定为:\n"
|
| 1097 |
+
"关于知识问题的回答:\n"
|
| 1098 |
+
"不要输出中间推理、不要分点编号、不要额外标题."
|
| 1099 |
+
),
|
| 1100 |
+
"en": (
|
| 1101 |
+
"You are an archery QA integrator.\n"
|
| 1102 |
+
"In ONE response, complete three modules and merge them with semantic consistency:\n"
|
| 1103 |
+
"1) Technical Action Analysis: based on tech_summary.\n"
|
| 1104 |
+
"2) Improvement Advice: based on question + keywords; general knowledge is allowed.\n"
|
| 1105 |
+
"3) Knowledge Answer: answer knowledge_question using ONLY knowledge_snippets.\n"
|
| 1106 |
+
"If snippets are insufficient, you must output 'Cannot determine from retrieved snippets.'\n"
|
| 1107 |
+
"Return exactly two lines:\n"
|
| 1108 |
+
"Technical Action Analysis:...\nImprovement Advice:...\n"
|
| 1109 |
+
"No extra headings or reasoning."
|
| 1110 |
+
),
|
| 1111 |
+
}
|
| 1112 |
+
user_prompt_map = {
|
| 1113 |
+
"zh": (
|
| 1114 |
+
f"question_tech: {question_tech}\n"
|
| 1115 |
+
f"tech_summary: {tech_summary if tech_summary else '(none)'}\n"
|
| 1116 |
+
f"keywords: {keyword_text}\n"
|
| 1117 |
+
# f"tech_suggestions_seed: {suggestion_seed}\n"
|
| 1118 |
+
f"knowledge_question: {knowledge_question if knowledge_question else '(none)'}\n"
|
| 1119 |
+
f"knowledge_snippets:\n{snippet_text}\n"
|
| 1120 |
+
"请按系统要求一次性完成并拼接."
|
| 1121 |
+
),
|
| 1122 |
+
"en": (
|
| 1123 |
+
f"question_tech: {question_tech}\n"
|
| 1124 |
+
f"tech_summary: {tech_summary if tech_summary else '(none)'}\n"
|
| 1125 |
+
f"keywords: {keyword_text}\n"
|
| 1126 |
+
# f"tech_suggestions_seed: {suggestion_seed}\n"
|
| 1127 |
+
f"knowledge_question: {knowledge_question if knowledge_question else '(none)'}\n"
|
| 1128 |
+
f"knowledge_snippets:\n{snippet_text}\n"
|
| 1129 |
+
"Complete and merge in one pass following the required format."
|
| 1130 |
+
),
|
| 1131 |
+
}
|
| 1132 |
+
try:
|
| 1133 |
+
resp = client.chat.completions.create(
|
| 1134 |
+
model=model,
|
| 1135 |
+
messages=[
|
| 1136 |
+
{"role": "system", "content": system_prompt_map[lang]},
|
| 1137 |
+
{"role": "user", "content": user_prompt_map[lang]},
|
| 1138 |
+
],
|
| 1139 |
+
temperature=0.3,
|
| 1140 |
+
)
|
| 1141 |
+
merged = _chat_completion_output_text(resp).strip()
|
| 1142 |
+
if merged:
|
| 1143 |
+
return merged
|
| 1144 |
+
except Exception as e:
|
| 1145 |
+
print(f"[QA] one-shot compose failed: {e}")
|
| 1146 |
+
|
| 1147 |
+
if lang == "zh":
|
| 1148 |
+
knowledge_fallback = "根据已检索片段无法确定." if knowledge_question else "无知识型问题."
|
| 1149 |
+
advice_fallback = (
|
| 1150 |
+
";".join(tech_suggestions[:8])
|
| 1151 |
+
if tech_suggestions
|
| 1152 |
+
else "当前暂无建议库,可先围绕关键词逐项练习并复查动作稳定性."
|
| 1153 |
+
)
|
| 1154 |
+
return "\n".join(
|
| 1155 |
+
[
|
| 1156 |
+
f"技术动作分析:{tech_summary if tech_summary else '暂无可用技术动作摘要.'}",
|
| 1157 |
+
f"改进建议:{advice_fallback}",
|
| 1158 |
+
f"知识问题回答:{knowledge_fallback}",
|
| 1159 |
+
]
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
knowledge_fallback = "Cannot determine from retrieved snippets." if knowledge_question else "No knowledge question."
|
| 1163 |
+
advice_fallback = (
|
| 1164 |
+
"; ".join(tech_suggestions[:8])
|
| 1165 |
+
if tech_suggestions
|
| 1166 |
+
else "No suggestion DB is available yet; focus on keyword-related drills and re-check movement stability."
|
| 1167 |
+
)
|
| 1168 |
+
return "\n".join(
|
| 1169 |
+
[
|
| 1170 |
+
f"Technical Action Analysis: {tech_summary if tech_summary else 'No technical summary available.'}",
|
| 1171 |
+
f"Improvement Advice: {advice_fallback}",
|
| 1172 |
+
f"Knowledge Answer: {knowledge_fallback}",
|
| 1173 |
+
]
|
| 1174 |
+
)
|
| 1175 |
+
#TODO Query逻辑写完
|
| 1176 |
+
def answer_archery_question(
|
| 1177 |
+
keywords: list[str],
|
| 1178 |
+
evaluation_text: str,
|
| 1179 |
+
question: str,
|
| 1180 |
+
tech_session,
|
| 1181 |
+
knowledge_session,
|
| 1182 |
+
language: str = "zh",
|
| 1183 |
+
embedding_model: str = "ali-text-embedding-v4",
|
| 1184 |
+
classifier_model: str = "qwen-plus",
|
| 1185 |
+
summarizer_model: str = "qwen-plus",
|
| 1186 |
+
top_k_knowledge_title: int = 5,
|
| 1187 |
+
top_k_knowledge_chunks: int = 8,
|
| 1188 |
+
show : bool = False
|
| 1189 |
+
) -> str:
|
| 1190 |
+
"""
|
| 1191 |
+
基于“技术评估上下文 + 静态知识库”生成射箭问答回复。
|
| 1192 |
+
|
| 1193 |
+
流程概览:
|
| 1194 |
+
1) 规范化语言与输入问题
|
| 1195 |
+
2) 用 LLM 拆分问题(技术细节 / 知识问答 / 作用范围)
|
| 1196 |
+
3) 按范围汇总评估文本并提取技术动作摘要
|
| 1197 |
+
4) 对知识子问题做向量检索补充
|
| 1198 |
+
5) 基于关键词生成改进建议 + 基于知识片段回答知识问题
|
| 1199 |
+
6) 统一拼接为最终自然语言答案
|
| 1200 |
+
"""
|
| 1201 |
+
try:
|
| 1202 |
+
# 统一语言标识并清洗关键词,避免空白词污染检索。
|
| 1203 |
+
lang = _normalize_language_qa(language) #return zh/en
|
| 1204 |
+
clean_keywords = [str(k).strip() for k in (keywords or []) if str(k).strip()]
|
| 1205 |
+
try:
|
| 1206 |
+
# 对用户问题做基础净化(去噪/长度与格式保护)。
|
| 1207 |
+
question_clean = _sanitize_question_text(question)#去所有特殊符号和标点,转换成空格
|
| 1208 |
+
except Exception:
|
| 1209 |
+
return "问题为空或无有效内容,请提供具体问题." if lang == "zh" else "The question is empty after sanitization. Please provide a concrete question."
|
| 1210 |
+
|
| 1211 |
+
# 当问题拆分失败时,退化为“全部按技术问题处理”的保底策略。
|
| 1212 |
+
split_fallback = {
|
| 1213 |
+
"technical_part": question_clean,
|
| 1214 |
+
"knowledge_part": "",
|
| 1215 |
+
"technical_scope": "overall",
|
| 1216 |
+
"target_parts": [],
|
| 1217 |
+
}
|
| 1218 |
+
try:
|
| 1219 |
+
split_result = _split_question_with_llm(
|
| 1220 |
+
question_clean=question_clean,
|
| 1221 |
+
language=lang,
|
| 1222 |
+
model=classifier_model,
|
| 1223 |
+
)
|
| 1224 |
+
if show == True:
|
| 1225 |
+
print(split_result)
|
| 1226 |
+
except Exception as e:
|
| 1227 |
+
print(f"[QA] question split failed, use fallback: {e}")
|
| 1228 |
+
split_result = split_fallback
|
| 1229 |
+
|
| 1230 |
+
# 解析拆分结果并约束 scope 枚举值,防止异常输出影响后续分支。
|
| 1231 |
+
technical_part = str(split_result.get("technical_part", "") or "").strip()
|
| 1232 |
+
knowledge_part = str(split_result.get("knowledge_part", "") or "").strip()
|
| 1233 |
+
technical_scope = str(split_result.get("technical_scope", "none") or "none").strip().lower()
|
| 1234 |
+
if technical_scope not in {"overall", "parts", "none"}:
|
| 1235 |
+
technical_scope = "none"
|
| 1236 |
+
|
| 1237 |
+
# 部位名称做标准化并去重,用于“分部位技术检索”。
|
| 1238 |
+
target_parts_raw = split_result.get("target_parts", [])
|
| 1239 |
+
target_parts = []
|
| 1240 |
+
if isinstance(target_parts_raw, list):
|
| 1241 |
+
for part in target_parts_raw:
|
| 1242 |
+
canon_part = _normalize_part_name(str(part)) #match the correct name for each part
|
| 1243 |
+
if canon_part and canon_part not in target_parts:
|
| 1244 |
+
target_parts.append(canon_part)
|
| 1245 |
+
|
| 1246 |
+
tech_summary = ""
|
| 1247 |
+
tech_suggestions = None
|
| 1248 |
+
advice_keywords = clean_keywords
|
| 1249 |
+
|
| 1250 |
+
if technical_part:
|
| 1251 |
+
if technical_scope == "parts" and target_parts:
|
| 1252 |
+
# 分部位问题: 优先使用部位过滤后的关键词,并抽取对应部位评估摘要。
|
| 1253 |
+
scoped_keywords = _filter_keywords_for_parts(clean_keywords, target_parts, lang)
|
| 1254 |
+
if not scoped_keywords:
|
| 1255 |
+
# 过滤后为空时回退到原始关键词,避免检索结果为空。
|
| 1256 |
+
scoped_keywords = clean_keywords
|
| 1257 |
+
advice_keywords = scoped_keywords
|
| 1258 |
+
tech_summary = _compress_eval_text(
|
| 1259 |
+
eval_text=evaluation_text,
|
| 1260 |
+
scope="parts",
|
| 1261 |
+
target_parts=target_parts,
|
| 1262 |
+
language=lang,
|
| 1263 |
+
model=summarizer_model,
|
| 1264 |
+
)
|
| 1265 |
+
else:
|
| 1266 |
+
# 整体技术问题: 使用整体评估摘要 + 全关键词建议生成。
|
| 1267 |
+
tech_summary = _compress_eval_text(
|
| 1268 |
+
eval_text=evaluation_text,
|
| 1269 |
+
scope="overall",
|
| 1270 |
+
target_parts=[],
|
| 1271 |
+
language=lang,
|
| 1272 |
+
model=summarizer_model,
|
| 1273 |
+
)
|
| 1274 |
+
knowledge_snippets = []
|
| 1275 |
+
if knowledge_part:
|
| 1276 |
+
# 知识型子问题单独检索,避免与技术建议混淆。
|
| 1277 |
+
knowledge_snippets = _retrieve_knowledge_by_question(
|
| 1278 |
+
question_text=knowledge_part,
|
| 1279 |
+
session=knowledge_session,
|
| 1280 |
+
embedding_model=embedding_model,
|
| 1281 |
+
title_top_k=top_k_knowledge_title,
|
| 1282 |
+
chunk_top_k=top_k_knowledge_chunks,
|
| 1283 |
+
)
|
| 1284 |
+
if show == True:
|
| 1285 |
+
print(knowledge_snippets)
|
| 1286 |
+
|
| 1287 |
+
# 汇总技术摘要、关键词建议、知识片段问答,生成最终文本。
|
| 1288 |
+
return _compose_qa_answer(
|
| 1289 |
+
question_tech=technical_part,
|
| 1290 |
+
tech_summary=tech_summary,
|
| 1291 |
+
keywords=advice_keywords,
|
| 1292 |
+
tech_suggestions=tech_suggestions,
|
| 1293 |
+
knowledge_question=knowledge_part,
|
| 1294 |
+
knowledge_snippets=knowledge_snippets,
|
| 1295 |
+
language=lang,
|
| 1296 |
+
model=summarizer_model,
|
| 1297 |
+
)
|
| 1298 |
+
|
| 1299 |
+
except Exception as e:
|
| 1300 |
+
print(f"[QA] answer_archery_question failed: {e}")
|
| 1301 |
+
lang = _normalize_language_qa(language)
|
| 1302 |
+
if lang == "zh":
|
| 1303 |
+
return "问答处理失败,请稍后重试."
|
| 1304 |
+
return "QA processing failed. Please try again later."
|
| 1305 |
+
|
| 1306 |
+
def get_response(
|
| 1307 |
+
keywords: list[str],
|
| 1308 |
+
score_dict: dict,
|
| 1309 |
+
retrieved_snippets: list[str],
|
| 1310 |
+
language: str = "en",
|
| 1311 |
+
) -> str:
|
| 1312 |
+
"""
|
| 1313 |
+
Generate final assessment text from:
|
| 1314 |
+
- keyword sequence
|
| 1315 |
+
- preliminary score dictionary
|
| 1316 |
+
- retrieved database snippets
|
| 1317 |
+
"""
|
| 1318 |
+
# client = OpenAI(
|
| 1319 |
+
# base_url="https://openrouter.ai/api/v1",
|
| 1320 |
+
# api_key=os.environ.get("OPENROUTER_API_KEY"),
|
| 1321 |
+
# )
|
| 1322 |
+
|
| 1323 |
+
client = OpenAI(
|
| 1324 |
+
api_key=os.getenv("ALI_API_KEY"),
|
| 1325 |
+
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
| 1326 |
+
)
|
| 1327 |
+
|
| 1328 |
+
language_norm = str(language or "en").strip().lower()
|
| 1329 |
+
if language_norm in ("zh", "zh-cn", "zh_hans", "cn", "chinese", "中文"):
|
| 1330 |
+
lang = "zh"
|
| 1331 |
+
elif language_norm in ("en", "en-us", "english"):
|
| 1332 |
+
lang = "en"
|
| 1333 |
+
else:
|
| 1334 |
+
raise ValueError("language must be 'en' or 'zh'")
|
| 1335 |
+
|
| 1336 |
+
if not isinstance(keywords, (list, tuple)):
|
| 1337 |
+
raise ValueError("keywords must be a list of strings")
|
| 1338 |
+
if not isinstance(score_dict, dict):
|
| 1339 |
+
raise ValueError("score_dict must be a dict")
|
| 1340 |
+
if not isinstance(retrieved_snippets, (list, tuple)):
|
| 1341 |
+
raise ValueError("retrieved_snippets must be a list of strings")
|
| 1342 |
+
|
| 1343 |
+
def _normalize_snippet_item(item) -> str:
|
| 1344 |
+
if isinstance(item, str):
|
| 1345 |
+
return item.strip()
|
| 1346 |
+
if isinstance(item, dict):
|
| 1347 |
+
for key in ("text", "content", "chunk", "document", "comment"):
|
| 1348 |
+
value = item.get(key)
|
| 1349 |
+
if value:
|
| 1350 |
+
return str(value).strip()
|
| 1351 |
+
return json.dumps(item, ensure_ascii=False)
|
| 1352 |
+
return str(item).strip()
|
| 1353 |
+
|
| 1354 |
+
cleaned_keywords = [str(k).strip() for k in keywords if str(k).strip()]
|
| 1355 |
+
cleaned_snippets = [_normalize_snippet_item(s) for s in retrieved_snippets]
|
| 1356 |
+
cleaned_snippets = [s for s in cleaned_snippets if s]
|
| 1357 |
+
|
| 1358 |
+
system_instruction_map = {
|
| 1359 |
+
"en": (
|
| 1360 |
+
"You are a professional archery evaluation coach.\n"
|
| 1361 |
+
"Use only user-provided inputs: keyword sequence, score dictionary, and retrieved DB snippets.\n"
|
| 1362 |
+
"Do not search or use external knowledge.\n"
|
| 1363 |
+
"Output must be plain assessment text only (not JSON, no markdown, no code block).\n"
|
| 1364 |
+
"Write a coherent professional summary and improvement suggestions.\n"
|
| 1365 |
+
"Use this format: This student's correct actions are: ... This student's incorrect actions are: ...\n"
|
| 1366 |
+
"Prefer retrieved DB snippets when semantically consistent with keywords and scores.\n"
|
| 1367 |
+
"Avoid contradictions and keep length around 550 characters."
|
| 1368 |
+
),
|
| 1369 |
+
"zh": (
|
| 1370 |
+
"你是一名专业的射箭评估教练.\n"
|
| 1371 |
+
"只能使用用户提供的输入:关键词序列、评估分数字典、数据库检索片段.\n"
|
| 1372 |
+
"不得搜索或引用外部知识,你的任务是根据输入内容识别表述的一致性并进行筛选、对输入信息进行拼接、生成流畅的评估文本.\n"
|
| 1373 |
+
"输出必须是纯文本评估结果(不要 含有JSON、Markdown 或代码块,也不要含有特殊字符).\n"
|
| 1374 |
+
"写出连贯、专业的动作质量评估与改进建议.\n"
|
| 1375 |
+
"确保最终评估文本中不存在语义完全相反(自相矛盾)的语句后再进行输出.\n"
|
| 1376 |
+
"只做评估工作,不进行额外的解释或说明(根据数据库中语句......可得到等类似的措辞不要出现,也不要出现数据)\n"
|
| 1377 |
+
"行文格式必须为:这位同学的正确动作如下: ... 这位同学的错误动作如下: ...\n"
|
| 1378 |
+
"当数据库片段与关键词及分数字典语义一致时优先使用数据库片段表达,如不一致则使用关键词序列中的表达.\n"
|
| 1379 |
+
"文本整体长度控制在约 550 字符."
|
| 1380 |
+
"数据库检索得到的文本片段并非完全正确,请仔细甄别,只使用与关键词序列语义相同的关键词."
|
| 1381 |
+
"注意,射箭人的动作并非一定有缺点,如果关键词序列中无负面评价,“这位同学的错误动作如下:”冒号后不填写任何东西即可.\n"
|
| 1382 |
+
),
|
| 1383 |
+
}
|
| 1384 |
+
|
| 1385 |
+
system_instruction = system_instruction_map[lang]
|
| 1386 |
+
keyword_text = "\n".join([f"- {k}" for k in cleaned_keywords]) if cleaned_keywords else "- (empty)"
|
| 1387 |
+
snippet_text = "\n".join([f"- {s}" for s in cleaned_snippets]) if cleaned_snippets else "- (empty)"
|
| 1388 |
+
score_json = json.dumps(score_dict, ensure_ascii=False, indent=2)
|
| 1389 |
+
|
| 1390 |
+
user_prompt_map = {
|
| 1391 |
+
"en": (
|
| 1392 |
+
"Keywords:\n"
|
| 1393 |
+
f"{keyword_text}\n\n"
|
| 1394 |
+
"Score dictionary(Head, hands, feet, arms, torso and overall movement quality assessment):\n"
|
| 1395 |
+
f"{score_json}\n\n"
|
| 1396 |
+
"Retrieved database snippets:\n"
|
| 1397 |
+
f"{snippet_text}\n\n"
|
| 1398 |
+
"Return only the final assessment text string."
|
| 1399 |
+
),
|
| 1400 |
+
"zh": (
|
| 1401 |
+
"关键词序列:\n"
|
| 1402 |
+
f"{keyword_text}\n\n"
|
| 1403 |
+
"评估分数字典(头部,手部,足部,手臂,躯干及整体动作质量评分):\n"
|
| 1404 |
+
f"{score_json}\n\n"
|
| 1405 |
+
"数据库检索片段:\n"
|
| 1406 |
+
f"{snippet_text}\n\n"
|
| 1407 |
+
"只返回最终评估文本字符串."
|
| 1408 |
+
),
|
| 1409 |
+
}
|
| 1410 |
+
|
| 1411 |
+
messages = [
|
| 1412 |
+
{"role": "system", "content": system_instruction},
|
| 1413 |
+
{"role": "user", "content": user_prompt_map[lang]},
|
| 1414 |
+
]
|
| 1415 |
+
|
| 1416 |
+
answer_content = ""
|
| 1417 |
+
try:
|
| 1418 |
+
completion = client.chat.completions.create(
|
| 1419 |
+
model="qwen-plus",
|
| 1420 |
+
messages=messages,
|
| 1421 |
+
stream=True,
|
| 1422 |
+
temperature=0.7,
|
| 1423 |
+
)
|
| 1424 |
+
|
| 1425 |
+
print("\n" + "=" * 20 + " Generating Assessment Text " + "=" * 20)
|
| 1426 |
+
is_answering = False
|
| 1427 |
+
|
| 1428 |
+
for chunk in completion:
|
| 1429 |
+
delta = chunk.choices[0].delta
|
| 1430 |
+
|
| 1431 |
+
if hasattr(delta, "reasoning_content") and delta.reasoning_content:
|
| 1432 |
+
if not is_answering:
|
| 1433 |
+
print(f"\n[Thinking]: {delta.reasoning_content}", end="", flush=True)
|
| 1434 |
+
|
| 1435 |
+
if hasattr(delta, "content") and delta.content:
|
| 1436 |
+
if not is_answering:
|
| 1437 |
+
print("\n" + "-" * 45)
|
| 1438 |
+
is_answering = True
|
| 1439 |
+
print(delta.content, end="", flush=True)
|
| 1440 |
+
answer_content += delta.content
|
| 1441 |
+
|
| 1442 |
+
print("\n" + "=" * 45)
|
| 1443 |
+
|
| 1444 |
+
except Exception as e:
|
| 1445 |
+
print(f"API call error: {e}")
|
| 1446 |
+
return f"Error: {e}"
|
| 1447 |
+
|
| 1448 |
+
return answer_content.strip()
|
| 1449 |
+
|
| 1450 |
+
def get_embedding_languagebind_text(texts):
|
| 1451 |
+
"""Get text embeddings using LanguageBind model"""
|
| 1452 |
+
texts = texts if isinstance(texts, list) else [texts]
|
| 1453 |
+
device = torch.device("cuda:0")
|
| 1454 |
+
|
| 1455 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(
|
| 1456 |
+
'lb203/LanguageBind_Image',
|
| 1457 |
+
cache_dir='./cache_dir/tokenizer_cache_dir'
|
| 1458 |
+
)
|
| 1459 |
+
|
| 1460 |
+
model = LanguageBind(clip_type={'image': 'LanguageBind_Image'},
|
| 1461 |
+
cache_dir='./cache_dir').to(device)
|
| 1462 |
+
model.eval()
|
| 1463 |
+
|
| 1464 |
+
inputs = {
|
| 1465 |
+
'language': to_device(tokenizer(texts, max_length=77,
|
| 1466 |
+
padding='max_length',
|
| 1467 |
+
truncation=True,
|
| 1468 |
+
return_tensors='pt'),
|
| 1469 |
+
device)
|
| 1470 |
+
}
|
| 1471 |
+
|
| 1472 |
+
with torch.no_grad():
|
| 1473 |
+
embeddings = model(inputs)['language']
|
| 1474 |
+
return embeddings.cpu().numpy()
|
| 1475 |
+
|
| 1476 |
+
def get_embedding_languagebind_video(video_path):
|
| 1477 |
+
"""Get video embeddings using LanguageBind model"""
|
| 1478 |
+
video_path = video_path if isinstance(video_path, list) else [video_path]
|
| 1479 |
+
clip_type = {
|
| 1480 |
+
'video': 'LanguageBind_Video_FT',
|
| 1481 |
+
}
|
| 1482 |
+
model = LanguageBind(clip_type=clip_type)
|
| 1483 |
+
model = model.to(device)
|
| 1484 |
+
model.eval()
|
| 1485 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type.keys()}
|
| 1486 |
+
|
| 1487 |
+
inputs = {
|
| 1488 |
+
'video': to_device(modality_transform['video'](video_path), device)
|
| 1489 |
+
}
|
| 1490 |
+
|
| 1491 |
+
with torch.no_grad():
|
| 1492 |
+
embeddings = model(inputs)['video']
|
| 1493 |
+
return embeddings.cpu().numpy()
|
| 1494 |
+
|
| 1495 |
+
def construct_complex_prompt(scores: Dict, prompt: str, comment_text: List[str]):
|
| 1496 |
+
"""
|
| 1497 |
+
Construct a complex prompt containing structured scores, retrieved comments, and user instructions
|
| 1498 |
+
"""
|
| 1499 |
+
scores_str = json.dumps(scores, indent=2, ensure_ascii=False)
|
| 1500 |
+
|
| 1501 |
+
formatted_comments = "\n".join([f"{i+1}. {text}" for i, text in enumerate(comment_text)])
|
| 1502 |
+
print(formatted_comments)
|
| 1503 |
+
|
| 1504 |
+
final_content = f"""Please evaluate the archer's posture based on the following information:
|
| 1505 |
+
1. Movement Scores for Each Body Part of Athletes (json):
|
| 1506 |
+
{scores_str}
|
| 1507 |
+
2. Evaluation keywords and phrases provided by professional coaches retrieved from the database:
|
| 1508 |
+
{formatted_comments}
|
| 1509 |
+
"""
|
| 1510 |
+
return final_content
|
| 1511 |
+
|
RAG/Knowledge_Database/languagebind_main/1/1
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
1
|
RAG/Knowledge_Database/languagebind_main/DATASETS.md
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Sample data
|
| 2 |
+
We are releasing sample data here so that individuals who are interested can further modify the code to train it on their own data, which includes videos, text from various sources, depth, and infrared.
|
| 3 |
+
|
| 4 |
+
<div align="center">
|
| 5 |
+
<table border="1" width="100%">
|
| 6 |
+
<tr align="center">
|
| 7 |
+
<th></th><th>Baidu Yun</th><th>Google Cloud</th><th>Peking University Yun</th>
|
| 8 |
+
</tr>
|
| 9 |
+
<tr align="center">
|
| 10 |
+
<td>DATA</td><td><a href="https://pan.baidu.com/s/1MnQUO6xrMPE5HAwveAdtZA?pwd=5ug9">Link</a></td><td><a href="https://drive.google.com/file/d/1p7y_0H3c84VbWpI-zx_m_mgn84uTZTdO/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/B6BDBDDCC616D47126DD0FF568CAF6CD">Link</a></td>
|
| 11 |
+
</tr>
|
| 12 |
+
<tr align="center">
|
| 13 |
+
<td>ANNOTATION</td><td><a href="https://pan.baidu.com/s/1uxxx_67VWy25q7CDilLsHA?pwd=37j3">Link</a></td><td><a href="https://drive.google.com/file/d/1WWVkt9LdbGK0VeQh-g7xy1gUGBwzwVah/view?usp=drive_link">Link</a></td><td><a href=https://disk.pku.edu.cn:443/link/67D836DE504E96457554455A597DC57F"">Link</a></td>
|
| 14 |
+
</tr>
|
| 15 |
+
</table>
|
| 16 |
+
</div>
|
| 17 |
+
|
| 18 |
+
## VIDAL-10M
|
| 19 |
+
|
| 20 |
+
### Text and Video
|
| 21 |
+
Due to policy restrictions, we are unable to directly release the videos. However, we provide the YouTube IDs, which can be used to download the videos independently. All textual sources and YouTube IDs can be downloaded from [Google Disk](https://drive.google.com/file/d/1qgm3rO9JugazLJ6KRsAKZfLIagHu3PJ-/view?usp=sharing) or [Baidu Disk](https://pan.baidu.com/s/13gY-IcFSFIuDZ-q0hMTx0g?pwd=gum9).
|
| 22 |
+
|
| 23 |
+
The organization format of `ANNOTATION` is as follows:
|
| 24 |
+
```Bash
|
| 25 |
+
{
|
| 26 |
+
"ImkVYKWqlDU": {
|
| 27 |
+
"folder": "coco_vat_9",
|
| 28 |
+
"mplug": "This video describes a group of scuba divers rolling backwards off a boat while playing an instrument. They are having fun and enjoying their time in the water.",
|
| 29 |
+
"polish_mplug": "scuba divers are seen rolling backwards off a boat while playing an instrument, displaying enjoyment and having a good time in the water.",
|
| 30 |
+
"ofa": [
|
| 31 |
+
" a man in a wet suit and a helmet on a boat",
|
| 32 |
+
" a man in a scuba suit on a boat",
|
| 33 |
+
" a person in a boat holding a diver helmet",
|
| 34 |
+
" a man in a wetsuit on a jet ski",
|
| 35 |
+
" a picture of a body of water with the words boats on it",
|
| 36 |
+
" a person in the water with the words if they rolled",
|
| 37 |
+
" a person in the water with a paddle",
|
| 38 |
+
" a person in the water with a scooter"
|
| 39 |
+
],
|
| 40 |
+
"sound_mplug": "scuba divers rolling backwards off a boat while playing an instrument showcases exuberant laughter, splashing water, and cheery melodies blending with the gentle waves.",
|
| 41 |
+
"raw": "WHY SCUBA DIVERS ROLL BACKWARDS OFF BOAT #shorts"
|
| 42 |
+
},
|
| 43 |
+
"id": {
|
| 44 |
+
"folder": "video_folder",
|
| 45 |
+
"mplug": "mplug_caption",
|
| 46 |
+
"polish_mplug": "polish_mplug_caption",
|
| 47 |
+
"ofa": [
|
| 48 |
+
"ofa_caption_0",
|
| 49 |
+
"ofa_caption_1",
|
| 50 |
+
"ofa_caption_2",
|
| 51 |
+
"ofa_caption_3",
|
| 52 |
+
"ofa_caption_4",
|
| 53 |
+
"ofa_caption_5",
|
| 54 |
+
"ofa_caption_6",
|
| 55 |
+
"ofa_caption_7"
|
| 56 |
+
],
|
| 57 |
+
"sound_mplug": "sound_mplug_caption",
|
| 58 |
+
"raw": "raw_caption#hashtags"
|
| 59 |
+
},
|
| 60 |
+
...
|
| 61 |
+
}
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
### Depth and Thermal (Infrared)
|
| 65 |
+
|
| 66 |
+
We are uploading data to [Hugging Face](https://huggingface.co/datasets/LanguageBind/VIDAL-Depth-Thermal), but based on a conservative estimate, it's approximately **20T**. Please be patient as we work on it.
|
RAG/Knowledge_Database/languagebind_main/DATASET_LICENSE
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Attribution-NonCommercial 4.0 International
|
| 3 |
+
|
| 4 |
+
=======================================================================
|
| 5 |
+
|
| 6 |
+
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
| 7 |
+
does not provide legal services or legal advice. Distribution of
|
| 8 |
+
Creative Commons public licenses does not create a lawyer-client or
|
| 9 |
+
other relationship. Creative Commons makes its licenses and related
|
| 10 |
+
information available on an "as-is" basis. Creative Commons gives no
|
| 11 |
+
warranties regarding its licenses, any material licensed under their
|
| 12 |
+
terms and conditions, or any related information. Creative Commons
|
| 13 |
+
disclaims all liability for damages resulting from their use to the
|
| 14 |
+
fullest extent possible.
|
| 15 |
+
|
| 16 |
+
Using Creative Commons Public Licenses
|
| 17 |
+
|
| 18 |
+
Creative Commons public licenses provide a standard set of terms and
|
| 19 |
+
conditions that creators and other rights holders may use to share
|
| 20 |
+
original works of authorship and other material subject to copyright
|
| 21 |
+
and certain other rights specified in the public license below. The
|
| 22 |
+
following considerations are for informational purposes only, are not
|
| 23 |
+
exhaustive, and do not form part of our licenses.
|
| 24 |
+
|
| 25 |
+
Considerations for licensors: Our public licenses are
|
| 26 |
+
intended for use by those authorized to give the public
|
| 27 |
+
permission to use material in ways otherwise restricted by
|
| 28 |
+
copyright and certain other rights. Our licenses are
|
| 29 |
+
irrevocable. Licensors should read and understand the terms
|
| 30 |
+
and conditions of the license they choose before applying it.
|
| 31 |
+
Licensors should also secure all rights necessary before
|
| 32 |
+
applying our licenses so that the public can reuse the
|
| 33 |
+
material as expected. Licensors should clearly mark any
|
| 34 |
+
material not subject to the license. This includes other CC-
|
| 35 |
+
licensed material, or material used under an exception or
|
| 36 |
+
limitation to copyright. More considerations for licensors:
|
| 37 |
+
wiki.creativecommons.org/Considerations_for_licensors
|
| 38 |
+
|
| 39 |
+
Considerations for the public: By using one of our public
|
| 40 |
+
licenses, a licensor grants the public permission to use the
|
| 41 |
+
licensed material under specified terms and conditions. If
|
| 42 |
+
the licensor's permission is not necessary for any reason--for
|
| 43 |
+
example, because of any applicable exception or limitation to
|
| 44 |
+
copyright--then that use is not regulated by the license. Our
|
| 45 |
+
licenses grant only permissions under copyright and certain
|
| 46 |
+
other rights that a licensor has authority to grant. Use of
|
| 47 |
+
the licensed material may still be restricted for other
|
| 48 |
+
reasons, including because others have copyright or other
|
| 49 |
+
rights in the material. A licensor may make special requests,
|
| 50 |
+
such as asking that all changes be marked or described.
|
| 51 |
+
Although not required by our licenses, you are encouraged to
|
| 52 |
+
respect those requests where reasonable. More_considerations
|
| 53 |
+
for the public:
|
| 54 |
+
wiki.creativecommons.org/Considerations_for_licensees
|
| 55 |
+
|
| 56 |
+
=======================================================================
|
| 57 |
+
|
| 58 |
+
Creative Commons Attribution-NonCommercial 4.0 International Public
|
| 59 |
+
License
|
| 60 |
+
|
| 61 |
+
By exercising the Licensed Rights (defined below), You accept and agree
|
| 62 |
+
to be bound by the terms and conditions of this Creative Commons
|
| 63 |
+
Attribution-NonCommercial 4.0 International Public License ("Public
|
| 64 |
+
License"). To the extent this Public License may be interpreted as a
|
| 65 |
+
contract, You are granted the Licensed Rights in consideration of Your
|
| 66 |
+
acceptance of these terms and conditions, and the Licensor grants You
|
| 67 |
+
such rights in consideration of benefits the Licensor receives from
|
| 68 |
+
making the Licensed Material available under these terms and
|
| 69 |
+
conditions.
|
| 70 |
+
|
| 71 |
+
Section 1 -- Definitions.
|
| 72 |
+
|
| 73 |
+
a. Adapted Material means material subject to Copyright and Similar
|
| 74 |
+
Rights that is derived from or based upon the Licensed Material
|
| 75 |
+
and in which the Licensed Material is translated, altered,
|
| 76 |
+
arranged, transformed, or otherwise modified in a manner requiring
|
| 77 |
+
permission under the Copyright and Similar Rights held by the
|
| 78 |
+
Licensor. For purposes of this Public License, where the Licensed
|
| 79 |
+
Material is a musical work, performance, or sound recording,
|
| 80 |
+
Adapted Material is always produced where the Licensed Material is
|
| 81 |
+
synched in timed relation with a moving image.
|
| 82 |
+
|
| 83 |
+
b. Adapter's License means the license You apply to Your Copyright
|
| 84 |
+
and Similar Rights in Your contributions to Adapted Material in
|
| 85 |
+
accordance with the terms and conditions of this Public License.
|
| 86 |
+
|
| 87 |
+
c. Copyright and Similar Rights means copyright and/or similar rights
|
| 88 |
+
closely related to copyright including, without limitation,
|
| 89 |
+
performance, broadcast, sound recording, and Sui Generis Database
|
| 90 |
+
Rights, without regard to how the rights are labeled or
|
| 91 |
+
categorized. For purposes of this Public License, the rights
|
| 92 |
+
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
| 93 |
+
Rights.
|
| 94 |
+
d. Effective Technological Measures means those measures that, in the
|
| 95 |
+
absence of proper authority, may not be circumvented under laws
|
| 96 |
+
fulfilling obligations under Article 11 of the WIPO Copyright
|
| 97 |
+
Treaty adopted on December 20, 1996, and/or similar international
|
| 98 |
+
agreements.
|
| 99 |
+
|
| 100 |
+
e. Exceptions and Limitations means fair use, fair dealing, and/or
|
| 101 |
+
any other exception or limitation to Copyright and Similar Rights
|
| 102 |
+
that applies to Your use of the Licensed Material.
|
| 103 |
+
|
| 104 |
+
f. Licensed Material means the artistic or literary work, database,
|
| 105 |
+
or other material to which the Licensor applied this Public
|
| 106 |
+
License.
|
| 107 |
+
|
| 108 |
+
g. Licensed Rights means the rights granted to You subject to the
|
| 109 |
+
terms and conditions of this Public License, which are limited to
|
| 110 |
+
all Copyright and Similar Rights that apply to Your use of the
|
| 111 |
+
Licensed Material and that the Licensor has authority to license.
|
| 112 |
+
|
| 113 |
+
h. Licensor means the individual(s) or entity(ies) granting rights
|
| 114 |
+
under this Public License.
|
| 115 |
+
|
| 116 |
+
i. NonCommercial means not primarily intended for or directed towards
|
| 117 |
+
commercial advantage or monetary compensation. For purposes of
|
| 118 |
+
this Public License, the exchange of the Licensed Material for
|
| 119 |
+
other material subject to Copyright and Similar Rights by digital
|
| 120 |
+
file-sharing or similar means is NonCommercial provided there is
|
| 121 |
+
no payment of monetary compensation in connection with the
|
| 122 |
+
exchange.
|
| 123 |
+
|
| 124 |
+
j. Share means to provide material to the public by any means or
|
| 125 |
+
process that requires permission under the Licensed Rights, such
|
| 126 |
+
as reproduction, public display, public performance, distribution,
|
| 127 |
+
dissemination, communication, or importation, and to make material
|
| 128 |
+
available to the public including in ways that members of the
|
| 129 |
+
public may access the material from a place and at a time
|
| 130 |
+
individually chosen by them.
|
| 131 |
+
|
| 132 |
+
k. Sui Generis Database Rights means rights other than copyright
|
| 133 |
+
resulting from Directive 96/9/EC of the European Parliament and of
|
| 134 |
+
the Council of 11 March 1996 on the legal protection of databases,
|
| 135 |
+
as amended and/or succeeded, as well as other essentially
|
| 136 |
+
equivalent rights anywhere in the world.
|
| 137 |
+
|
| 138 |
+
l. You means the individual or entity exercising the Licensed Rights
|
| 139 |
+
under this Public License. Your has a corresponding meaning.
|
| 140 |
+
|
| 141 |
+
Section 2 -- Scope.
|
| 142 |
+
|
| 143 |
+
a. License grant.
|
| 144 |
+
|
| 145 |
+
1. Subject to the terms and conditions of this Public License,
|
| 146 |
+
the Licensor hereby grants You a worldwide, royalty-free,
|
| 147 |
+
non-sublicensable, non-exclusive, irrevocable license to
|
| 148 |
+
exercise the Licensed Rights in the Licensed Material to:
|
| 149 |
+
|
| 150 |
+
a. reproduce and Share the Licensed Material, in whole or
|
| 151 |
+
in part, for NonCommercial purposes only; and
|
| 152 |
+
|
| 153 |
+
b. produce, reproduce, and Share Adapted Material for
|
| 154 |
+
NonCommercial purposes only.
|
| 155 |
+
|
| 156 |
+
2. Exceptions and Limitations. For the avoidance of doubt, where
|
| 157 |
+
Exceptions and Limitations apply to Your use, this Public
|
| 158 |
+
License does not apply, and You do not need to comply with
|
| 159 |
+
its terms and conditions.
|
| 160 |
+
|
| 161 |
+
3. Term. The term of this Public License is specified in Section
|
| 162 |
+
6(a).
|
| 163 |
+
|
| 164 |
+
4. Media and formats; technical modifications allowed. The
|
| 165 |
+
Licensor authorizes You to exercise the Licensed Rights in
|
| 166 |
+
all media and formats whether now known or hereafter created,
|
| 167 |
+
and to make technical modifications necessary to do so. The
|
| 168 |
+
Licensor waives and/or agrees not to assert any right or
|
| 169 |
+
authority to forbid You from making technical modifications
|
| 170 |
+
necessary to exercise the Licensed Rights, including
|
| 171 |
+
technical modifications necessary to circumvent Effective
|
| 172 |
+
Technological Measures. For purposes of this Public License,
|
| 173 |
+
simply making modifications authorized by this Section 2(a)
|
| 174 |
+
(4) never produces Adapted Material.
|
| 175 |
+
|
| 176 |
+
5. Downstream recipients.
|
| 177 |
+
|
| 178 |
+
a. Offer from the Licensor -- Licensed Material. Every
|
| 179 |
+
recipient of the Licensed Material automatically
|
| 180 |
+
receives an offer from the Licensor to exercise the
|
| 181 |
+
Licensed Rights under the terms and conditions of this
|
| 182 |
+
Public License.
|
| 183 |
+
|
| 184 |
+
b. No downstream restrictions. You may not offer or impose
|
| 185 |
+
any additional or different terms or conditions on, or
|
| 186 |
+
apply any Effective Technological Measures to, the
|
| 187 |
+
Licensed Material if doing so restricts exercise of the
|
| 188 |
+
Licensed Rights by any recipient of the Licensed
|
| 189 |
+
Material.
|
| 190 |
+
|
| 191 |
+
6. No endorsement. Nothing in this Public License constitutes or
|
| 192 |
+
may be construed as permission to assert or imply that You
|
| 193 |
+
are, or that Your use of the Licensed Material is, connected
|
| 194 |
+
with, or sponsored, endorsed, or granted official status by,
|
| 195 |
+
the Licensor or others designated to receive attribution as
|
| 196 |
+
provided in Section 3(a)(1)(A)(i).
|
| 197 |
+
|
| 198 |
+
b. Other rights.
|
| 199 |
+
|
| 200 |
+
1. Moral rights, such as the right of integrity, are not
|
| 201 |
+
licensed under this Public License, nor are publicity,
|
| 202 |
+
privacy, and/or other similar personality rights; however, to
|
| 203 |
+
the extent possible, the Licensor waives and/or agrees not to
|
| 204 |
+
assert any such rights held by the Licensor to the limited
|
| 205 |
+
extent necessary to allow You to exercise the Licensed
|
| 206 |
+
Rights, but not otherwise.
|
| 207 |
+
|
| 208 |
+
2. Patent and trademark rights are not licensed under this
|
| 209 |
+
Public License.
|
| 210 |
+
|
| 211 |
+
3. To the extent possible, the Licensor waives any right to
|
| 212 |
+
collect royalties from You for the exercise of the Licensed
|
| 213 |
+
Rights, whether directly or through a collecting society
|
| 214 |
+
under any voluntary or waivable statutory or compulsory
|
| 215 |
+
licensing scheme. In all other cases the Licensor expressly
|
| 216 |
+
reserves any right to collect such royalties, including when
|
| 217 |
+
the Licensed Material is used other than for NonCommercial
|
| 218 |
+
purposes.
|
| 219 |
+
|
| 220 |
+
Section 3 -- License Conditions.
|
| 221 |
+
|
| 222 |
+
Your exercise of the Licensed Rights is expressly made subject to the
|
| 223 |
+
following conditions.
|
| 224 |
+
|
| 225 |
+
a. Attribution.
|
| 226 |
+
|
| 227 |
+
1. If You Share the Licensed Material (including in modified
|
| 228 |
+
form), You must:
|
| 229 |
+
|
| 230 |
+
a. retain the following if it is supplied by the Licensor
|
| 231 |
+
with the Licensed Material:
|
| 232 |
+
|
| 233 |
+
i. identification of the creator(s) of the Licensed
|
| 234 |
+
Material and any others designated to receive
|
| 235 |
+
attribution, in any reasonable manner requested by
|
| 236 |
+
the Licensor (including by pseudonym if
|
| 237 |
+
designated);
|
| 238 |
+
|
| 239 |
+
ii. a copyright notice;
|
| 240 |
+
|
| 241 |
+
iii. a notice that refers to this Public License;
|
| 242 |
+
|
| 243 |
+
iv. a notice that refers to the disclaimer of
|
| 244 |
+
warranties;
|
| 245 |
+
|
| 246 |
+
v. a URI or hyperlink to the Licensed Material to the
|
| 247 |
+
extent reasonably practicable;
|
| 248 |
+
|
| 249 |
+
b. indicate if You modified the Licensed Material and
|
| 250 |
+
retain an indication of any previous modifications; and
|
| 251 |
+
|
| 252 |
+
c. indicate the Licensed Material is licensed under this
|
| 253 |
+
Public License, and include the text of, or the URI or
|
| 254 |
+
hyperlink to, this Public License.
|
| 255 |
+
|
| 256 |
+
2. You may satisfy the conditions in Section 3(a)(1) in any
|
| 257 |
+
reasonable manner based on the medium, means, and context in
|
| 258 |
+
which You Share the Licensed Material. For example, it may be
|
| 259 |
+
reasonable to satisfy the conditions by providing a URI or
|
| 260 |
+
hyperlink to a resource that includes the required
|
| 261 |
+
information.
|
| 262 |
+
|
| 263 |
+
3. If requested by the Licensor, You must remove any of the
|
| 264 |
+
information required by Section 3(a)(1)(A) to the extent
|
| 265 |
+
reasonably practicable.
|
| 266 |
+
|
| 267 |
+
4. If You Share Adapted Material You produce, the Adapter's
|
| 268 |
+
License You apply must not prevent recipients of the Adapted
|
| 269 |
+
Material from complying with this Public License.
|
| 270 |
+
|
| 271 |
+
Section 4 -- Sui Generis Database Rights.
|
| 272 |
+
|
| 273 |
+
Where the Licensed Rights include Sui Generis Database Rights that
|
| 274 |
+
apply to Your use of the Licensed Material:
|
| 275 |
+
|
| 276 |
+
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
| 277 |
+
to extract, reuse, reproduce, and Share all or a substantial
|
| 278 |
+
portion of the contents of the database for NonCommercial purposes
|
| 279 |
+
only;
|
| 280 |
+
|
| 281 |
+
b. if You include all or a substantial portion of the database
|
| 282 |
+
contents in a database in which You have Sui Generis Database
|
| 283 |
+
Rights, then the database in which You have Sui Generis Database
|
| 284 |
+
Rights (but not its individual contents) is Adapted Material; and
|
| 285 |
+
|
| 286 |
+
c. You must comply with the conditions in Section 3(a) if You Share
|
| 287 |
+
all or a substantial portion of the contents of the database.
|
| 288 |
+
|
| 289 |
+
For the avoidance of doubt, this Section 4 supplements and does not
|
| 290 |
+
replace Your obligations under this Public License where the Licensed
|
| 291 |
+
Rights include other Copyright and Similar Rights.
|
| 292 |
+
|
| 293 |
+
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
| 294 |
+
|
| 295 |
+
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
| 296 |
+
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
| 297 |
+
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
| 298 |
+
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
| 299 |
+
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
| 300 |
+
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
| 301 |
+
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
| 302 |
+
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
| 303 |
+
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
| 304 |
+
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
| 305 |
+
|
| 306 |
+
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
| 307 |
+
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
| 308 |
+
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
| 309 |
+
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
| 310 |
+
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
| 311 |
+
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
| 312 |
+
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
| 313 |
+
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
| 314 |
+
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
| 315 |
+
|
| 316 |
+
c. The disclaimer of warranties and limitation of liability provided
|
| 317 |
+
above shall be interpreted in a manner that, to the extent
|
| 318 |
+
possible, most closely approximates an absolute disclaimer and
|
| 319 |
+
waiver of all liability.
|
| 320 |
+
|
| 321 |
+
Section 6 -- Term and Termination.
|
| 322 |
+
|
| 323 |
+
a. This Public License applies for the term of the Copyright and
|
| 324 |
+
Similar Rights licensed here. However, if You fail to comply with
|
| 325 |
+
this Public License, then Your rights under this Public License
|
| 326 |
+
terminate automatically.
|
| 327 |
+
|
| 328 |
+
b. Where Your right to use the Licensed Material has terminated under
|
| 329 |
+
Section 6(a), it reinstates:
|
| 330 |
+
|
| 331 |
+
1. automatically as of the date the violation is cured, provided
|
| 332 |
+
it is cured within 30 days of Your discovery of the
|
| 333 |
+
violation; or
|
| 334 |
+
|
| 335 |
+
2. upon express reinstatement by the Licensor.
|
| 336 |
+
|
| 337 |
+
For the avoidance of doubt, this Section 6(b) does not affect any
|
| 338 |
+
right the Licensor may have to seek remedies for Your violations
|
| 339 |
+
of this Public License.
|
| 340 |
+
|
| 341 |
+
c. For the avoidance of doubt, the Licensor may also offer the
|
| 342 |
+
Licensed Material under separate terms or conditions or stop
|
| 343 |
+
distributing the Licensed Material at any time; however, doing so
|
| 344 |
+
will not terminate this Public License.
|
| 345 |
+
|
| 346 |
+
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
| 347 |
+
License.
|
| 348 |
+
|
| 349 |
+
Section 7 -- Other Terms and Conditions.
|
| 350 |
+
|
| 351 |
+
a. The Licensor shall not be bound by any additional or different
|
| 352 |
+
terms or conditions communicated by You unless expressly agreed.
|
| 353 |
+
|
| 354 |
+
b. Any arrangements, understandings, or agreements regarding the
|
| 355 |
+
Licensed Material not stated herein are separate from and
|
| 356 |
+
independent of the terms and conditions of this Public License.
|
| 357 |
+
|
| 358 |
+
Section 8 -- Interpretation.
|
| 359 |
+
|
| 360 |
+
a. For the avoidance of doubt, this Public License does not, and
|
| 361 |
+
shall not be interpreted to, reduce, limit, restrict, or impose
|
| 362 |
+
conditions on any use of the Licensed Material that could lawfully
|
| 363 |
+
be made without permission under this Public License.
|
| 364 |
+
|
| 365 |
+
b. To the extent possible, if any provision of this Public License is
|
| 366 |
+
deemed unenforceable, it shall be automatically reformed to the
|
| 367 |
+
minimum extent necessary to make it enforceable. If the provision
|
| 368 |
+
cannot be reformed, it shall be severed from this Public License
|
| 369 |
+
without affecting the enforceability of the remaining terms and
|
| 370 |
+
conditions.
|
| 371 |
+
|
| 372 |
+
c. No term or condition of this Public License will be waived and no
|
| 373 |
+
failure to comply consented to unless expressly agreed to by the
|
| 374 |
+
Licensor.
|
| 375 |
+
|
| 376 |
+
d. Nothing in this Public License constitutes or may be interpreted
|
| 377 |
+
as a limitation upon, or waiver of, any privileges and immunities
|
| 378 |
+
that apply to the Licensor or You, including from the legal
|
| 379 |
+
processes of any jurisdiction or authority.
|
| 380 |
+
|
| 381 |
+
=======================================================================
|
| 382 |
+
|
| 383 |
+
Creative Commons is not a party to its public
|
| 384 |
+
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
| 385 |
+
its public licenses to material it publishes and in those instances
|
| 386 |
+
will be considered the “Licensor.” The text of the Creative Commons
|
| 387 |
+
public licenses is dedicated to the public domain under the CC0 Public
|
| 388 |
+
Domain Dedication. Except for the limited purpose of indicating that
|
| 389 |
+
material is shared under a Creative Commons public license or as
|
| 390 |
+
otherwise permitted by the Creative Commons policies published at
|
| 391 |
+
creativecommons.org/policies, Creative Commons does not authorize the
|
| 392 |
+
use of the trademark "Creative Commons" or any other trademark or logo
|
| 393 |
+
of Creative Commons without its prior written consent including,
|
| 394 |
+
without limitation, in connection with any unauthorized modifications
|
| 395 |
+
to any of its public licenses or any other arrangements,
|
| 396 |
+
understandings, or agreements concerning use of licensed material. For
|
| 397 |
+
the avoidance of doubt, this paragraph does not form part of the
|
| 398 |
+
public licenses.
|
| 399 |
+
|
| 400 |
+
Creative Commons may be contacted at creativecommons.org.
|
RAG/Knowledge_Database/languagebind_main/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2023 PKU-YUAN's Group (袁粒课题组-北大信工)
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAG/Knowledge_Database/languagebind_main/README.md
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<img src="assets/logo.jpg" width="350" style="margin-bottom: 0.2;"/><img src="assets/sota.jpg" width="450" style="margin-bottom: 0.2;"/>
|
| 3 |
+
<p>
|
| 4 |
+
<h2 align="center"> <a href="https://arxiv.org/pdf/2310.01852.pdf">【ICLR 2024 🔥】LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment</a></h2>
|
| 5 |
+
<h5 align="center"> If you like our project, please give us a star ⭐ on GitHub for latest update. </h2>
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
<h5 align="center">
|
| 9 |
+
|
| 10 |
+
[](https://huggingface.co/spaces/LanguageBind/LanguageBind)
|
| 11 |
+
[](https://huggingface.co/datasets/LanguageBind/VIDAL-Depth-Thermal)
|
| 12 |
+
[](https://arxiv.org/abs/2310.01852)
|
| 13 |
+
[](https://mp.weixin.qq.com/s/EFqLv_Euf5VU024zOtzkkg)
|
| 14 |
+
[](https://mp.weixin.qq.com/s/E5Tazm_vz1CADMwV0tdhnw)
|
| 15 |
+
[](https://zhuanlan.zhihu.com/p/660567767)
|
| 16 |
+
[](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/LICENSE)
|
| 17 |
+
[](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/DATASET_LICENSE)
|
| 18 |
+
[](https://hits.seeyoufarm.com)
|
| 19 |
+
[](https://github.com/PKU-YuanGroup/LanguageBind/issues?q=is%3Aopen+is%3Aissue)
|
| 20 |
+
[](https://github.com/PKU-YuanGroup/LanguageBind/issues?q=is%3Aissue+is%3Aclosed) <br>
|
| 21 |
+
|
| 22 |
+
</h5>
|
| 23 |
+
|
| 24 |
+
[](https://paperswithcode.com/sota/zero-shot-audio-classification-on-audioset?p=languagebind-extending-video-language) <br>
|
| 25 |
+
[](https://paperswithcode.com/sota/zero-shot-audio-classification-on-vgg-sound?p=languagebind-extending-video-language) <br>
|
| 26 |
+
[](https://paperswithcode.com/sota/zero-shot-text-to-audio-retrieval-on-clotho?p=languagebind-extending-video-language) <br>
|
| 27 |
+
[](https://paperswithcode.com/sota/zero-shot-scene-classification-unified?p=languagebind-extending-video-language) <br>
|
| 28 |
+
[](https://paperswithcode.com/sota/zero-shot-classification-unified-classes-on?p=languagebind-extending-video-language) <br>
|
| 29 |
+
[](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-msvd?p=languagebind-extending-video-language) <br>
|
| 30 |
+
[](https://paperswithcode.com/sota/zero-shot-environment-sound-classification-on-1?p=languagebind-extending-video-language) <br>
|
| 31 |
+
[](https://paperswithcode.com/sota/zero-shot-text-to-audio-retrieval-on?p=languagebind-extending-video-language) <br>
|
| 32 |
+
[](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-activitynet?p=languagebind-extending-video-language) <br>
|
| 33 |
+
[](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-msr-vtt?p=languagebind-extending-video-language) <br>
|
| 34 |
+
[](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-didemo?p=languagebind-extending-video-language) <br>
|
| 35 |
+
[](https://paperswithcode.com/sota/zero-shot-action-recognition-on-kinetics?p=languagebind-extending-video-language)
|
| 36 |
+
|
| 37 |
+
<details open><summary>💡 I also have other vision-language projects that may interest you ✨. </summary><p>
|
| 38 |
+
<!-- may -->
|
| 39 |
+
|
| 40 |
+
> [**Video-LLaVA: Learning United Visual Representation by Alignment Before Projection**](https://arxiv.org/abs/2311.10122) <br>
|
| 41 |
+
> Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, Li Yuan <br>
|
| 42 |
+
[](https://github.com/PKU-YuanGroup/Video-LLaVA) [](https://github.com/PKU-YuanGroup/Video-LLaVA) [](https://arxiv.org/abs/2311.10122) <br>
|
| 43 |
+
|
| 44 |
+
> [**MoE-LLaVA: Mixture of Experts for Large Vision-Language Models**](https://github.com/PKU-YuanGroup/MoE-LLaVA/blob/main/MoE-LLaVA.pdf) <br>
|
| 45 |
+
> Bin Lin, Zhenyu Tang, Yang Ye, Jiaxi Cui, Bin Zhu, Peng Jin, Junwu Zhang, Munan Ning, Li Yuan <br>
|
| 46 |
+
[](https://github.com/PKU-YuanGroup/MoE-LLaVA) [](https://github.com/PKU-YuanGroup/MoE-LLaVA) [](https://arxiv.org/abs/2401.15947) <br>
|
| 47 |
+
|
| 48 |
+
> [**Video-Bench: A Comprehensive Benchmark and Toolkit for Evaluating Video-based Large Language Models**](https://arxiv.org/abs/2311.08046) <br>
|
| 49 |
+
> Munan Ning, Bin Zhu, Yujia Xie, Bin Lin, Jiaxi Cui, Lu Yuan, Dongdong Chen, Li Yuan <br>
|
| 50 |
+
[](https://github.com/PKU-YuanGroup/Video-Bench) [](https://github.com/PKU-YuanGroup/Video-Bench) [](https://arxiv.org/abs/2311.16103) <br>
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
</p></details>
|
| 54 |
+
|
| 55 |
+
## 📰 News
|
| 56 |
+
* **[2024.01.27]** 👀👀👀 Our [MoE-LLaVA](https://github.com/PKU-YuanGroup/MoE-LLaVA) is released! A sparse model with 3B parameters outperformed the dense model with 7B parameters.
|
| 57 |
+
* **[2024.01.16]** 🔥🔥🔥 Our LanguageBind has been accepted at ICLR 2024! We earn the score of 6(3)8(6)6(6)6(6) [here](https://openreview.net/forum?id=QmZKc7UZCy¬eId=OgsxQxAleA).
|
| 58 |
+
* **[2023.12.15]** 💪💪💪 We expand the 💥💥💥 VIDAL dataset and now have **10M video-text data**. We launch **LanguageBind_Video 1.5**, checking our [model zoo](#-model-zoo).
|
| 59 |
+
* **[2023.12.10]** We expand the 💥💥💥 VIDAL dataset and now have **10M depth and 10M thermal data**. We are in the process of uploading thermal and depth data on [Hugging Face](https://huggingface.co/datasets/LanguageBind/VIDAL-Depth-Thermal) and expect the whole process to last 1-2 months.
|
| 60 |
+
* **[2023.11.27]** 🔥🔥🔥 We have updated our [paper](https://arxiv.org/abs/2310.01852) with emergency zero-shot results., checking our ✨ [results](#emergency-results).
|
| 61 |
+
* **[2023.11.26]** 💥💥💥 We have open-sourced all textual sources and corresponding YouTube IDs [here](DATASETS.md).
|
| 62 |
+
* **[2023.11.26]** 📣📣📣 We have open-sourced fully fine-tuned **Video & Audio**, achieving improved performance once again, checking our [model zoo](#-model-zoo).
|
| 63 |
+
* **[2023.11.22]** We are about to release a fully fine-tuned version, and the **HUGE** version is currently undergoing training.
|
| 64 |
+
* **[2023.11.21]** 💥 We are releasing sample data in [DATASETS.md](DATASETS.md) so that individuals who are interested can further modify the code to train it on their own data.
|
| 65 |
+
* **[2023.11.20]** 🚀🚀🚀 [Video-LLaVA](https://github.com/PKU-YuanGroup/Video-LLaVA) builds a large visual-language model to achieve 🎉SOTA performances based on LanguageBind encoders.
|
| 66 |
+
* **[2023.10.23]** 🎶 LanguageBind-Audio achieves 🎉🎉🎉**state-of-the-art (SOTA) performance on 5 datasets**, checking our ✨ [results](#multiple-modalities)!
|
| 67 |
+
* **[2023.10.14]** 😱 Released a stronger LanguageBind-Video, checking our ✨ [results](#video-language)! The video checkpoint **have updated** on Huggingface Model Hub!
|
| 68 |
+
* **[2023.10.10]** We provide sample data, which can be found in [assets](assets), and [emergency zero-shot usage](#emergency-zero-shot) is described.
|
| 69 |
+
* **[2023.10.07]** The checkpoints are available on 🤗 [Huggingface Model](https://huggingface.co/LanguageBind).
|
| 70 |
+
* **[2023.10.04]** Code and [demo](https://huggingface.co/spaces/LanguageBind/LanguageBind) are available now! Welcome to **watch** 👀 this repository for the latest updates.
|
| 71 |
+
|
| 72 |
+
## 😮 Highlights
|
| 73 |
+
|
| 74 |
+
### 💡 High performance, but NO intermediate modality required
|
| 75 |
+
LanguageBind is a **language-centric** multimodal pretraining approach, **taking the language as the bind across different modalities** because the language modality is well-explored and contains rich semantics.
|
| 76 |
+
* The following first figure shows the architecture of LanguageBind. LanguageBind can be easily extended to segmentation, detection tasks, and potentially to unlimited modalities.
|
| 77 |
+
|
| 78 |
+
### ⚡️ A multimodal, fully aligned and voluminous dataset
|
| 79 |
+
We propose **VIDAL-10M**, **10 Million data** with **V**ideo, **I**nfrared, **D**epth, **A**udio and their corresponding **L**anguage, which greatly expands the data beyond visual modalities.
|
| 80 |
+
* The second figure shows our proposed VIDAL-10M dataset, which includes five modalities: video, infrared, depth, audio, and language.
|
| 81 |
+
|
| 82 |
+
### 🔥 Multi-view enhanced description for training
|
| 83 |
+
We make multi-view enhancements to language. We produce multi-view description that combines **meta-data**, **spatial**, and **temporal** to greatly enhance the semantic information of the language. In addition we further **enhance the language with ChatGPT** to create a good semantic space for each modality aligned language.
|
| 84 |
+
|
| 85 |
+
<p align="center">
|
| 86 |
+
<img src="assets/languagebind.jpg" width=100%>
|
| 87 |
+
</p>
|
| 88 |
+
<p align="center">
|
| 89 |
+
<img src="assets/iclr_dataset_sample.jpg" width=99%>
|
| 90 |
+
</p>
|
| 91 |
+
|
| 92 |
+
## 🤗 Demo
|
| 93 |
+
|
| 94 |
+
* **Local demo.** Highly recommend trying out our web demo, which incorporates all features currently supported by LanguageBind.
|
| 95 |
+
```bash
|
| 96 |
+
python gradio_app.py
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
* **Online demo.** We provide the [online demo](https://huggingface.co/spaces/LanguageBind/LanguageBind) in Huggingface Spaces. In this demo, you can calculate the similarity of modalities to language, such as audio-to-language, video-to-language, and depth-to-image.
|
| 100 |
+
<p align="center">
|
| 101 |
+
<img src="assets/demo.png" width=100%>
|
| 102 |
+
</p>
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
## 🚀 Main Results
|
| 107 |
+
|
| 108 |
+
### Video-Language
|
| 109 |
+
LanguageBind achieves **state-of-the-art (SOTA) performance on four datasets**, * donates the results of full tuning.
|
| 110 |
+
<p align="left">
|
| 111 |
+
<img src="assets/result1.jpg" width=80%>
|
| 112 |
+
</p>
|
| 113 |
+
|
| 114 |
+
### Multiple Modalities
|
| 115 |
+
Video-Language, Infrared-Language, Depth-Language, and Audio-Language zero-shot classification, * donates the results of full tuning.
|
| 116 |
+
<p align="left">
|
| 117 |
+
<img src="assets/res1.jpg" width=80%>
|
| 118 |
+
</p>
|
| 119 |
+
We report text-to-audio results for retrieval, * donates the results of full tuning.
|
| 120 |
+
<p align="left">
|
| 121 |
+
<img src="assets/res2.jpg" width=35%>
|
| 122 |
+
</p>
|
| 123 |
+
|
| 124 |
+
### Emergency results
|
| 125 |
+
<p align="left">
|
| 126 |
+
<img src="assets/emergency.jpg" width=60%>
|
| 127 |
+
</p>
|
| 128 |
+
|
| 129 |
+
## 🛠️ Requirements and Installation
|
| 130 |
+
* Python >= 3.8
|
| 131 |
+
* Pytorch >= 1.13.1
|
| 132 |
+
* CUDA Version >= 11.6
|
| 133 |
+
* Install required packages:
|
| 134 |
+
```bash
|
| 135 |
+
git clone https://github.com/PKU-YuanGroup/LanguageBind
|
| 136 |
+
cd LanguageBind
|
| 137 |
+
pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
|
| 138 |
+
pip install -r requirements.txt
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
## 🐳 Model Zoo
|
| 142 |
+
|
| 143 |
+
The names in the table represent different encoder models. For example, `LanguageBind/LanguageBind_Video_FT` represents the fully fine-tuned version, while `LanguageBind/LanguageBind_Video` represents the LoRA-tuned version.
|
| 144 |
+
|
| 145 |
+
You can freely replace them in the recommended [API usage](#-api). We recommend using the fully fine-tuned version, as it offers stronger performance.
|
| 146 |
+
|
| 147 |
+
<div align="center">
|
| 148 |
+
<table border="1" width="100%">
|
| 149 |
+
<tr align="center">
|
| 150 |
+
<th>Modality</th><th>LoRA tuning</th><th>Fine-tuning</th>
|
| 151 |
+
</tr>
|
| 152 |
+
<tr align="center">
|
| 153 |
+
<td>Video</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video">LanguageBind_Video</a></td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video_FT">LanguageBind_Video_FT</a></td>
|
| 154 |
+
</tr>
|
| 155 |
+
<tr align="center">
|
| 156 |
+
<td>Audio</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Audio">LanguageBind_Audio</a></td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Audio_FT">LanguageBind_Audio_FT</a></td>
|
| 157 |
+
</tr>
|
| 158 |
+
<tr align="center">
|
| 159 |
+
<td>Depth</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Depth">LanguageBind_Depth</a></td><td>-</td>
|
| 160 |
+
</tr>
|
| 161 |
+
<tr align="center">
|
| 162 |
+
<td>Thermal</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Thermal">LanguageBind_Thermal</a></td><td>-</td>
|
| 163 |
+
</tr>
|
| 164 |
+
</table>
|
| 165 |
+
</div>
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
<div align="center">
|
| 169 |
+
<table border="1" width="100%">
|
| 170 |
+
<tr align="center">
|
| 171 |
+
<th>Version</th><th>Tuning</th><th>Model size</th><th>Num_frames</th><th>HF Link</th><th>MSR-VTT</th><th>DiDeMo</th><th>ActivityNet</th><th>MSVD</th>
|
| 172 |
+
</tr>
|
| 173 |
+
<tr align="center">
|
| 174 |
+
<td>LanguageBind_Video</td><td>LoRA</td><td>Large</td><td>8</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video">Link</a></td><td>42.6</td><td>37.8</td><td>35.1</td><td>52.2</td>
|
| 175 |
+
</tr>
|
| 176 |
+
<tr align="center">
|
| 177 |
+
<td>LanguageBind_Video_FT</td><td>Full-tuning</td><td>Large</td><td>8</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video_FT">Link</a></td><td>42.7</td><td>38.1</td><td>36.9</td><td>53.5</td>
|
| 178 |
+
</tr>
|
| 179 |
+
<tr align="center">
|
| 180 |
+
<td>LanguageBind_Video_V1.5_FT</td><td>Full-tuning</td><td>Large</td><td>8</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video_V1.5_FT">Link</a></td><td>42.8</td><td>39.7</td><td>38.4</td><td>54.1</td>
|
| 181 |
+
</tr>
|
| 182 |
+
<tr align="center">
|
| 183 |
+
<td>LanguageBind_Video_V1.5_FT</td><td>Full-tuning</td><td>Large</td><td>12</td><td>Coming soon</td>
|
| 184 |
+
</tr>
|
| 185 |
+
<tr align="center">
|
| 186 |
+
<td>LanguageBind_Video_Huge_V1.5_FT</td><td>Full-tuning</td><td>Huge</td><td>8</td><td><a href="https://huggingface.co/LanguageBind/LanguageBind_Video_Huge_V1.5_FT">Link</a></td><td>44.8</td><td>39.9</td><td>41.0</td><td>53.7</td>
|
| 187 |
+
</tr>
|
| 188 |
+
<tr align="center">
|
| 189 |
+
<td>LanguageBind_Video_Huge_V1.5_FT</td><td>Full-tuning</td><td>Huge</td><td>12</td><td>Coming soon</td>
|
| 190 |
+
</tr>
|
| 191 |
+
</table>
|
| 192 |
+
</div>
|
| 193 |
+
|
| 194 |
+
## 🤖 API
|
| 195 |
+
**We open source all modalities preprocessing code.** If you want to load the model (e.g. ```LanguageBind/LanguageBind_Thermal```) from the model hub on Huggingface or on local, you can use the following code snippets!
|
| 196 |
+
### Inference for Multi-modal Binding
|
| 197 |
+
We have provided some sample datasets in [assets](assets) to quickly see how languagebind works.
|
| 198 |
+
```python
|
| 199 |
+
import torch
|
| 200 |
+
from languagebind import LanguageBind, to_device, transform_dict, LanguageBindImageTokenizer
|
| 201 |
+
|
| 202 |
+
if __name__ == '__main__':
|
| 203 |
+
device = 'cuda:0'
|
| 204 |
+
device = torch.device(device)
|
| 205 |
+
clip_type = {
|
| 206 |
+
'video': 'LanguageBind_Video_FT', # also LanguageBind_Video
|
| 207 |
+
'audio': 'LanguageBind_Audio_FT', # also LanguageBind_Audio
|
| 208 |
+
'thermal': 'LanguageBind_Thermal',
|
| 209 |
+
'image': 'LanguageBind_Image',
|
| 210 |
+
'depth': 'LanguageBind_Depth',
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
model = LanguageBind(clip_type=clip_type, cache_dir='./cache_dir')
|
| 214 |
+
model = model.to(device)
|
| 215 |
+
model.eval()
|
| 216 |
+
pretrained_ckpt = f'lb203/LanguageBind_Image'
|
| 217 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir/tokenizer_cache_dir')
|
| 218 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type.keys()}
|
| 219 |
+
|
| 220 |
+
image = ['assets/image/0.jpg', 'assets/image/1.jpg']
|
| 221 |
+
audio = ['assets/audio/0.wav', 'assets/audio/1.wav']
|
| 222 |
+
video = ['assets/video/0.mp4', 'assets/video/1.mp4']
|
| 223 |
+
depth = ['assets/depth/0.png', 'assets/depth/1.png']
|
| 224 |
+
thermal = ['assets/thermal/0.jpg', 'assets/thermal/1.jpg']
|
| 225 |
+
language = ["Training a parakeet to climb up a ladder.", 'A lion climbing a tree to catch a monkey.']
|
| 226 |
+
|
| 227 |
+
inputs = {
|
| 228 |
+
'image': to_device(modality_transform['image'](image), device),
|
| 229 |
+
'video': to_device(modality_transform['video'](video), device),
|
| 230 |
+
'audio': to_device(modality_transform['audio'](audio), device),
|
| 231 |
+
'depth': to_device(modality_transform['depth'](depth), device),
|
| 232 |
+
'thermal': to_device(modality_transform['thermal'](thermal), device),
|
| 233 |
+
}
|
| 234 |
+
inputs['language'] = to_device(tokenizer(language, max_length=77, padding='max_length',
|
| 235 |
+
truncation=True, return_tensors='pt'), device)
|
| 236 |
+
|
| 237 |
+
with torch.no_grad():
|
| 238 |
+
embeddings = model(inputs)
|
| 239 |
+
|
| 240 |
+
print("Video x Text: \n",
|
| 241 |
+
torch.softmax(embeddings['video'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
| 242 |
+
print("Image x Text: \n",
|
| 243 |
+
torch.softmax(embeddings['image'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
| 244 |
+
print("Depth x Text: \n",
|
| 245 |
+
torch.softmax(embeddings['depth'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
| 246 |
+
print("Audio x Text: \n",
|
| 247 |
+
torch.softmax(embeddings['audio'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
| 248 |
+
print("Thermal x Text: \n",
|
| 249 |
+
torch.softmax(embeddings['thermal'] @ embeddings['language'].T, dim=-1).detach().cpu().numpy())
|
| 250 |
+
```
|
| 251 |
+
Then returns the following result.
|
| 252 |
+
```bash
|
| 253 |
+
Video x Text:
|
| 254 |
+
[[9.9989331e-01 1.0667283e-04]
|
| 255 |
+
[1.3255903e-03 9.9867439e-01]]
|
| 256 |
+
Image x Text:
|
| 257 |
+
[[9.9990666e-01 9.3292067e-05]
|
| 258 |
+
[4.6132666e-08 1.0000000e+00]]
|
| 259 |
+
Depth x Text:
|
| 260 |
+
[[0.9954276 0.00457235]
|
| 261 |
+
[0.12042473 0.8795753 ]]
|
| 262 |
+
Audio x Text:
|
| 263 |
+
[[0.97634876 0.02365119]
|
| 264 |
+
[0.02917843 0.97082156]]
|
| 265 |
+
Thermal x Text:
|
| 266 |
+
[[0.9482511 0.0517489 ]
|
| 267 |
+
[0.48746133 0.5125386 ]]
|
| 268 |
+
```
|
| 269 |
+
### Emergency zero-shot
|
| 270 |
+
Since languagebind binds each modality together, we also found the **emergency zero-shot**. It's very simple to use.
|
| 271 |
+
```python
|
| 272 |
+
print("Video x Audio: \n", torch.softmax(embeddings['video'] @ embeddings['audio'].T, dim=-1).detach().cpu().numpy())
|
| 273 |
+
print("Image x Depth: \n", torch.softmax(embeddings['image'] @ embeddings['depth'].T, dim=-1).detach().cpu().numpy())
|
| 274 |
+
print("Image x Thermal: \n", torch.softmax(embeddings['image'] @ embeddings['thermal'].T, dim=-1).detach().cpu().numpy())
|
| 275 |
+
```
|
| 276 |
+
Then, you will get:
|
| 277 |
+
```
|
| 278 |
+
Video x Audio:
|
| 279 |
+
[[1.0000000e+00 0.0000000e+00]
|
| 280 |
+
[3.1150486e-32 1.0000000e+00]]
|
| 281 |
+
Image x Depth:
|
| 282 |
+
[[1. 0.]
|
| 283 |
+
[0. 1.]]
|
| 284 |
+
Image x Thermal:
|
| 285 |
+
[[1. 0.]
|
| 286 |
+
[0. 1.]]
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
### Different branches for X-Language task
|
| 290 |
+
Additionally, LanguageBind can be **disassembled into different branches** to handle different tasks. Note that we do not train Image, which just initialize from OpenCLIP.
|
| 291 |
+
#### Thermal
|
| 292 |
+
```python
|
| 293 |
+
import torch
|
| 294 |
+
from languagebind import LanguageBindThermal, LanguageBindThermalTokenizer, LanguageBindThermalProcessor
|
| 295 |
+
|
| 296 |
+
pretrained_ckpt = 'LanguageBind/LanguageBind_Thermal'
|
| 297 |
+
model = LanguageBindThermal.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 298 |
+
tokenizer = LanguageBindThermalTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 299 |
+
thermal_process = LanguageBindThermalProcessor(model.config, tokenizer)
|
| 300 |
+
|
| 301 |
+
model.eval()
|
| 302 |
+
data = thermal_process([r"your/thermal.jpg"], ['your text'], return_tensors='pt')
|
| 303 |
+
with torch.no_grad():
|
| 304 |
+
out = model(**data)
|
| 305 |
+
|
| 306 |
+
print(out.text_embeds @ out.image_embeds.T)
|
| 307 |
+
```
|
| 308 |
+
|
| 309 |
+
#### Depth
|
| 310 |
+
```python
|
| 311 |
+
import torch
|
| 312 |
+
from languagebind import LanguageBindDepth, LanguageBindDepthTokenizer, LanguageBindDepthProcessor
|
| 313 |
+
|
| 314 |
+
pretrained_ckpt = 'LanguageBind/LanguageBind_Depth'
|
| 315 |
+
model = LanguageBindDepth.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 316 |
+
tokenizer = LanguageBindDepthTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 317 |
+
depth_process = LanguageBindDepthProcessor(model.config, tokenizer)
|
| 318 |
+
|
| 319 |
+
model.eval()
|
| 320 |
+
data = depth_process([r"your/depth.png"], ['your text.'], return_tensors='pt')
|
| 321 |
+
with torch.no_grad():
|
| 322 |
+
out = model(**data)
|
| 323 |
+
|
| 324 |
+
print(out.text_embeds @ out.image_embeds.T)
|
| 325 |
+
```
|
| 326 |
+
|
| 327 |
+
#### Video
|
| 328 |
+
```python
|
| 329 |
+
import torch
|
| 330 |
+
from languagebind import LanguageBindVideo, LanguageBindVideoTokenizer, LanguageBindVideoProcessor
|
| 331 |
+
|
| 332 |
+
pretrained_ckpt = 'LanguageBind/LanguageBind_Video_FT' # also 'LanguageBind/LanguageBind_Video'
|
| 333 |
+
model = LanguageBindVideo.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 334 |
+
tokenizer = LanguageBindVideoTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 335 |
+
video_process = LanguageBindVideoProcessor(model.config, tokenizer)
|
| 336 |
+
|
| 337 |
+
model.eval()
|
| 338 |
+
data = video_process(["your/video.mp4"], ['your text.'], return_tensors='pt')
|
| 339 |
+
with torch.no_grad():
|
| 340 |
+
out = model(**data)
|
| 341 |
+
|
| 342 |
+
print(out.text_embeds @ out.image_embeds.T)
|
| 343 |
+
```
|
| 344 |
+
|
| 345 |
+
#### Audio
|
| 346 |
+
```python
|
| 347 |
+
import torch
|
| 348 |
+
from languagebind import LanguageBindAudio, LanguageBindAudioTokenizer, LanguageBindAudioProcessor
|
| 349 |
+
|
| 350 |
+
pretrained_ckpt = 'LanguageBind/LanguageBind_Audio_FT' # also 'LanguageBind/LanguageBind_Audio'
|
| 351 |
+
model = LanguageBindAudio.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 352 |
+
tokenizer = LanguageBindAudioTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 353 |
+
audio_process = LanguageBindAudioProcessor(model.config, tokenizer)
|
| 354 |
+
|
| 355 |
+
model.eval()
|
| 356 |
+
data = audio_process([r"your/audio.wav"], ['your audio.'], return_tensors='pt')
|
| 357 |
+
with torch.no_grad():
|
| 358 |
+
out = model(**data)
|
| 359 |
+
|
| 360 |
+
print(out.text_embeds @ out.image_embeds.T)
|
| 361 |
+
```
|
| 362 |
+
|
| 363 |
+
#### Image
|
| 364 |
+
Note that our image encoder is the same as OpenCLIP. **Not** as fine-tuned as other modalities.
|
| 365 |
+
```python
|
| 366 |
+
import torch
|
| 367 |
+
from languagebind import LanguageBindImage, LanguageBindImageTokenizer, LanguageBindImageProcessor
|
| 368 |
+
|
| 369 |
+
pretrained_ckpt = 'LanguageBind/LanguageBind_Image'
|
| 370 |
+
model = LanguageBindImage.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 371 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir')
|
| 372 |
+
image_process = LanguageBindImageProcessor(model.config, tokenizer)
|
| 373 |
+
|
| 374 |
+
model.eval()
|
| 375 |
+
data = image_process([r"your/image.jpg"], ['your text.'], return_tensors='pt')
|
| 376 |
+
with torch.no_grad():
|
| 377 |
+
out = model(**data)
|
| 378 |
+
|
| 379 |
+
print(out.text_embeds @ out.image_embeds.T)
|
| 380 |
+
```
|
| 381 |
+
|
| 382 |
+
## 💥 VIDAL-10M
|
| 383 |
+
The datasets is in [DATASETS.md](DATASETS.md).
|
| 384 |
+
|
| 385 |
+
## 🗝️ Training & Validating
|
| 386 |
+
The training & validating instruction is in [TRAIN_AND_VALIDATE.md](TRAIN_AND_VALIDATE.md).
|
| 387 |
+
|
| 388 |
+
## 👍 Acknowledgement
|
| 389 |
+
* [OpenCLIP](https://github.com/mlfoundations/open_clip) An open source pretraining framework.
|
| 390 |
+
* [CLIP4Clip](https://github.com/ArrowLuo/CLIP4Clip) An open source Video-Text retrieval framework.
|
| 391 |
+
* [sRGB-TIR](https://github.com/rpmsnu/sRGB-TIR) An open source framework to generate infrared (thermal) images.
|
| 392 |
+
* [GLPN](https://github.com/vinvino02/GLPDepth) An open source framework to generate depth images.
|
| 393 |
+
|
| 394 |
+
## 🔒 License
|
| 395 |
+
* The majority of this project is released under the MIT license as found in the [LICENSE](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/LICENSE) file.
|
| 396 |
+
* The dataset of this project is released under the CC-BY-NC 4.0 license as found in the [DATASET_LICENSE](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/DATASET_LICENSE) file.
|
| 397 |
+
|
| 398 |
+
## ✏️ Citation
|
| 399 |
+
If you find our paper and code useful in your research, please consider giving a star :star: and citation :pencil:.
|
| 400 |
+
|
| 401 |
+
```BibTeX
|
| 402 |
+
@misc{zhu2023languagebind,
|
| 403 |
+
title={LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment},
|
| 404 |
+
author={Bin Zhu and Bin Lin and Munan Ning and Yang Yan and Jiaxi Cui and Wang HongFa and Yatian Pang and Wenhao Jiang and Junwu Zhang and Zongwei Li and Cai Wan Zhang and Zhifeng Li and Wei Liu and Li Yuan},
|
| 405 |
+
year={2023},
|
| 406 |
+
eprint={2310.01852},
|
| 407 |
+
archivePrefix={arXiv},
|
| 408 |
+
primaryClass={cs.CV}
|
| 409 |
+
}
|
| 410 |
+
```
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
## ✨ Star History
|
| 414 |
+
|
| 415 |
+
[](https://star-history.com/#PKU-YuanGroup/LanguageBind&Date)
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
## 🤝 Contributors
|
| 419 |
+
|
| 420 |
+
<a href="https://github.com/PKU-YuanGroup/LanguageBind/graphs/contributors">
|
| 421 |
+
<img src="https://contrib.rocks/image?repo=PKU-YuanGroup/LanguageBind" />
|
| 422 |
+
</a>
|
RAG/Knowledge_Database/languagebind_main/TRAIN_AND_VALIDATE.md
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
We provide the **off-the-shelf** scripts in the [scripts folder](scripts).
|
| 2 |
+
|
| 3 |
+
## Training LanguageBind
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
<div align="center">
|
| 7 |
+
<table border="1" width="100%">
|
| 8 |
+
<tr align="center">
|
| 9 |
+
<th>Cache of pretrained weight</th><th>Baidu Yun</th><th>Google Cloud</th><th>Peking University Yun</th>
|
| 10 |
+
</tr>
|
| 11 |
+
<tr align="center">
|
| 12 |
+
<td>Large</td><td><a href="https://pan.baidu.com/s/1co46bkuUJXr8ePPKp1WWgA?pwd=ofm6">Link</a></td><td><a href="https://drive.google.com/drive/folders/1VQYZlqfKmCMuHffypf5F96odyMCEI87H?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/9CA764E6307790B01D2D4F7E314E8E43">Link</a></td>
|
| 13 |
+
</tr>
|
| 14 |
+
<tr align="center">
|
| 15 |
+
<td>Huge</td><td><a href="https://pan.baidu.com/s/1QLpyXEYunoXS-oqGsvzKKA?pwd=vgo2">Link</a></td><td>-</td><td><a href="https://disk.pku.edu.cn:443/link/720A77A7DB9EFD167C5AC8E3FC4B6068">Link</a></td>
|
| 16 |
+
</tr>
|
| 17 |
+
</table>
|
| 18 |
+
</div>
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
For example, to **train** LanguageBind on **Depth-Language** with 8 GPUs (1 nodes x 8 GPUs).
|
| 22 |
+
* First download the cache of pretrained weight above. and specify `CACHE_DIR=path/to/LanguageBind`.
|
| 23 |
+
* The second step is to develop a path to `ANNOTATION` and `DATA` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/base_datasets.py#L37) according to the [dataset preparation](https://github.com/PKU-YuanGroup/LanguageBind#-vidal-10m).
|
| 24 |
+
* Then you can run
|
| 25 |
+
|
| 26 |
+
```bash
|
| 27 |
+
CACHE_DIR="/path/to/LanguageBind"
|
| 28 |
+
ANNOTATION="path/to/data"
|
| 29 |
+
cd /path/to/LanguageBind
|
| 30 |
+
TORCH_DISTRIBUTED_DEBUG=DETAIL HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 torchrun --nnodes=1 --nproc_per_node 8 \
|
| 31 |
+
-m main \
|
| 32 |
+
--train-data ${ANNOTATION} \
|
| 33 |
+
--train-num-samples 3020000 \
|
| 34 |
+
--clip-type "dl" --max-depth 10 \
|
| 35 |
+
--do_train \
|
| 36 |
+
--lock-text --lock-image --text-type "polish_mplug" \
|
| 37 |
+
--init-temp 0.07 --learn-temp \
|
| 38 |
+
--model "ViT-L-14" --cache-dir ${CACHE_DIR} \
|
| 39 |
+
--convert_to_lora --lora_r 2 \
|
| 40 |
+
--lr 5e-4 --coef-lr 1e-3 \
|
| 41 |
+
--beta1 0.9 --beta2 0.98 --wd 0.2 --eps 1e-6 \
|
| 42 |
+
--num-frames 1 --force-patch-dropout 0.5 \
|
| 43 |
+
--epochs 1 --batch-size 128 --accum-freq 1 --warmup 200 \
|
| 44 |
+
--precision "amp" --workers 10 --video-decode-backend "imgs" \
|
| 45 |
+
--save-frequency 1 --log-every-n-steps 20 --report-to "tensorboard" --resume "latest" \
|
| 46 |
+
--do_eval \
|
| 47 |
+
--val_d_cls_data "NYUV2"
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
## Validating LanguageBind
|
| 52 |
+
|
| 53 |
+
For example, to **validate** LanguageBind on **Depth-Language** with 1 GPUs.
|
| 54 |
+
* First specify ```RESUME```.
|
| 55 |
+
* The second step is to prepare the [downstream dataset](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/TRAIN_AND_VALIDATE.md#downstream-datasets).
|
| 56 |
+
* Then you can run
|
| 57 |
+
|
| 58 |
+
```bash
|
| 59 |
+
CACHE_DIR="/path/to/LanguageBind"
|
| 60 |
+
RESUME="thermal_language.pt"
|
| 61 |
+
ANNOTATION="path/to/data"
|
| 62 |
+
cd /path/to/LanguageBind
|
| 63 |
+
TORCH_DISTRIBUTED_DEBUG=DETAIL HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 torchrun --nproc_per_node 1 \
|
| 64 |
+
-m main \
|
| 65 |
+
--train-data ${ANNOTATION} \
|
| 66 |
+
--train-num-samples 3020000 \
|
| 67 |
+
--clip-type "dl" --max-depth 10 \
|
| 68 |
+
--lock-text --lock-image --text-type "polish_mplug" \
|
| 69 |
+
--init-temp 0.07 --learn-temp \
|
| 70 |
+
--model "ViT-L-14" --cache-dir ${CACHE_DIR} \
|
| 71 |
+
--convert_to_lora --lora_r 2 \
|
| 72 |
+
--lr 5e-4 --coef-lr 1e-3 \
|
| 73 |
+
--beta1 0.9 --beta2 0.98 --wd 0.2 --eps 1e-6 \
|
| 74 |
+
--num-frames 1 --force-patch-dropout 0.5 \
|
| 75 |
+
--epochs 1 --batch-size 128 --accum-freq 1 --warmup 200 \
|
| 76 |
+
--precision "amp" --workers 10 --video-decode-backend "imgs" \
|
| 77 |
+
--save-frequency 1 --log-every-n-steps 20 --report-to "tensorboard" --resume ${RESUME} \
|
| 78 |
+
--do_eval \
|
| 79 |
+
--val_d_cls_data "NYUV2"
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
## Downstream datasets
|
| 83 |
+
|
| 84 |
+
### Depth
|
| 85 |
+
NYU V2 dataset is downloaded from [this repo](https://github.com/TUI-NICR/nicr-scene-analysis-datasets/tree/main/nicr_scene_analysis_datasets/datasets/nyuv2) and we reformat them to conform to the standard ImageNet format. We also provide data as follows. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L221).
|
| 86 |
+
|
| 87 |
+
<div align="center">
|
| 88 |
+
<table border="1" width="100%">
|
| 89 |
+
<tr align="center">
|
| 90 |
+
<th>Datasets</th><th>Baidu Yun</th><th>Google Cloud</th><th>Peking University Yun</th>
|
| 91 |
+
</tr>
|
| 92 |
+
<tr align="center">
|
| 93 |
+
<td>NYU</td><td><a href="https://pan.baidu.com/s/1AGOG8U3F7W8AvJiEmuzs-A?pwd=1dsg">Link</a></td><td><a href="https://drive.google.com/file/d/1CltzrTBLFqLxJzpztSIN-5ZosZpXQQ6u/view?usp=sharing">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/7D7B164DEA64059793D3C3E3A65C0F64">Link</a></td>
|
| 94 |
+
</tr>
|
| 95 |
+
</table>
|
| 96 |
+
</div>
|
| 97 |
+
|
| 98 |
+
### Video
|
| 99 |
+
Video datasets are downloaded from [this repo](https://github.com/jpthu17/HBI) and we show the folder structure. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L74).
|
| 100 |
+
|
| 101 |
+
### Audio
|
| 102 |
+
Audio datasets are downloaded from [this repo](https://github.com/OFA-Sys/ONE-PEACE/blob/main/datasets.md#audio) and Audioset from [here](https://github.com/qiuqiangkong/audioset_tagging_cnn#1-download-dataset).We reformat them to conform to the standard ImageNet format. Change the ```data_root``` [here1](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L144) and [here2](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L159).
|
| 103 |
+
|
| 104 |
+
### Infrared (Thermal)
|
| 105 |
+
We download LLVIP from [official website](https://bupt-ai-cz.github.io/LLVIP/), and FLIR from [here](https://www.flir.com/oem/adas/adas-dataset-form/). We reformat them to conform to the standard ImageNet format. Change the ```data_root``` [here](https://github.com/PKU-YuanGroup/LanguageBind/blob/main/data/build_datasets.py#L233). We also provide the processed data as follows.
|
| 106 |
+
|
| 107 |
+
<div align="center">
|
| 108 |
+
<table border="1" width="100%">
|
| 109 |
+
<tr align="center">
|
| 110 |
+
<th>Datasets</th><th>Baidu Yun</th><th>Google Cloud</th><th>Peking University Yun</th>
|
| 111 |
+
</tr>
|
| 112 |
+
<tr align="center">
|
| 113 |
+
<td>LLVIP</td><td><a href="https://pan.baidu.com/s/15HPVr016F7eO9005NDRJTg?pwd=46fh">Link</a></td><td><a href="https://drive.google.com/file/d/1RfKNR8q6dHiAHB4OlYecnkUSx-ghLuEO/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/30D592EA37AC7C411264801A74994376">Link</a></td>
|
| 114 |
+
</tr>
|
| 115 |
+
<tr align="center">
|
| 116 |
+
<td>FLIR V1</td><td><a href="https://pan.baidu.com/s/1ZDSo5VPxJ4SA7wS_rNk0uQ?pwd=l491">Link</a></td><td><a href="https://drive.google.com/file/d/1CezCLJ4GUfPMFimitPfK40OV2j2Kr8t8/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/AD89D6ADE2CAC2407B00650870CBBDEC">Link</a></td>
|
| 117 |
+
</tr>
|
| 118 |
+
<tr align="center">
|
| 119 |
+
<td>FLIR V2</td><td><a href="https://pan.baidu.com/s/16xdr2aQkHo3zJ4KbaTmO3Q?pwd=tj9f">Link</a></td><td><a href="https://drive.google.com/file/d/1Z2ThG5QH-9biFI2-Z8k2fBKSA6Nrees6/view?usp=drive_link">Link</a></td><td><a href="https://disk.pku.edu.cn:443/link/E06C010970B0ED51926700D2F7A21EA8">Link</a></td>
|
| 120 |
+
</tr>
|
| 121 |
+
</table>
|
| 122 |
+
</div>
|
| 123 |
+
|
| 124 |
+
### Folder structure
|
| 125 |
+
```bash
|
| 126 |
+
downstream_datasets
|
| 127 |
+
├── Audio
|
| 128 |
+
│ ├── audiocaps
|
| 129 |
+
│ │ └── audio
|
| 130 |
+
│ │ ├── test
|
| 131 |
+
│ │ ├── train
|
| 132 |
+
│ │ └── val
|
| 133 |
+
│ ├── audioset
|
| 134 |
+
│ │ ├── balanced_train_segments
|
| 135 |
+
│ │ ├── eval_segments
|
| 136 |
+
│ │ └── unbalanced_train_segments
|
| 137 |
+
│ │ ├── unbalanced_train_segments_part00
|
| 138 |
+
│ │ ├── unbalanced_train_segments_part01
|
| 139 |
+
│ │ ├── ...
|
| 140 |
+
│ │ └── unbalanced_train_segments_part40
|
| 141 |
+
│ ├── clotho
|
| 142 |
+
│ │ ├── CLOTHO_retrieval_dataset
|
| 143 |
+
│ │ └── evaluation
|
| 144 |
+
│ ├── esc50
|
| 145 |
+
│ │ └── test
|
| 146 |
+
│ │ ├── airplane
|
| 147 |
+
│ │ ├── breathing
|
| 148 |
+
│ │ ├── ...
|
| 149 |
+
│ │ └── wind
|
| 150 |
+
├── laionaudio
|
| 151 |
+
│ │ ├── audios
|
| 152 |
+
│ │ ├── freesound_no_overlap
|
| 153 |
+
│ │ └── jsons
|
| 154 |
+
├── vggsound
|
| 155 |
+
│ └── test
|
| 156 |
+
│ ├── air\ conditioning\ noise
|
| 157 |
+
│ ├── air\ horn
|
| 158 |
+
│ ├── ...
|
| 159 |
+
│ └── zebra\ braying
|
| 160 |
+
├── Depth
|
| 161 |
+
│ ├── nyuv2
|
| 162 |
+
│ │ ├── data
|
| 163 |
+
│ │ │ └── val
|
| 164 |
+
│ │ │ ├── bathroom
|
| 165 |
+
│ │ │ ├── bedroom
|
| 166 |
+
│ │ │ ├── bookstore
|
| 167 |
+
│ │ │ ├── classroom
|
| 168 |
+
│ │ │ ├── dining_room
|
| 169 |
+
│ │ │ ├── home_office
|
| 170 |
+
│ │ │ ├── kitchen
|
| 171 |
+
│ │ │ ├── living_room
|
| 172 |
+
│ │ │ ├── office
|
| 173 |
+
│ │ │ └── others
|
| 174 |
+
├── Thermal
|
| 175 |
+
│ ├── flirv1
|
| 176 |
+
│ │ └── val
|
| 177 |
+
│ │ ├── bicycle
|
| 178 |
+
│ │ ├── car
|
| 179 |
+
│ │ ├── dog
|
| 180 |
+
│ │ └── person
|
| 181 |
+
│ ├── flirv2
|
| 182 |
+
│ │ └── val
|
| 183 |
+
│ │ ├── bike
|
| 184 |
+
│ │ ├── bus
|
| 185 |
+
│ │ ├── car
|
| 186 |
+
│ │ ├── hydrant
|
| 187 |
+
│ │ ├── light
|
| 188 |
+
│ │ ├── motor
|
| 189 |
+
│ │ ├── other\ vehicle
|
| 190 |
+
│ │ ├── person
|
| 191 |
+
│ │ ├── sign
|
| 192 |
+
│ │ ├── skateboard
|
| 193 |
+
│ │ ├── stroller
|
| 194 |
+
│ │ └── truck
|
| 195 |
+
│ ├── llvip
|
| 196 |
+
│ │ ├── train
|
| 197 |
+
│ │ │ ├── background
|
| 198 |
+
│ │ │ └── person
|
| 199 |
+
│ │ └── val
|
| 200 |
+
│ │ ├── background
|
| 201 |
+
│ │ └── person
|
| 202 |
+
└── VideoTextRetrieval
|
| 203 |
+
├── vtRetdata
|
| 204 |
+
│ ├── ActivityNet
|
| 205 |
+
│ │ └── Videos
|
| 206 |
+
│ │ └── Activity_Videos
|
| 207 |
+
│ ├── Didemo
|
| 208 |
+
│ │ └── videos
|
| 209 |
+
│ ├── MSRVTT
|
| 210 |
+
│ │ └── MSRVTT_Videos
|
| 211 |
+
│ └── MSVD
|
| 212 |
+
│�� └── MSVD_Videos
|
| 213 |
+
```
|
| 214 |
+
|
RAG/Knowledge_Database/languagebind_main/a_cls/class_labels_indices.csv
ADDED
|
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
index,mid,display_name
|
| 2 |
+
0,/m/09x0r,"Speech"
|
| 3 |
+
1,/m/05zppz,"Male speech, man speaking"
|
| 4 |
+
2,/m/02zsn,"Female speech, woman speaking"
|
| 5 |
+
3,/m/0ytgt,"Child speech, kid speaking"
|
| 6 |
+
4,/m/01h8n0,"Conversation"
|
| 7 |
+
5,/m/02qldy,"Narration, monologue"
|
| 8 |
+
6,/m/0261r1,"Babbling"
|
| 9 |
+
7,/m/0brhx,"Speech synthesizer"
|
| 10 |
+
8,/m/07p6fty,"Shout"
|
| 11 |
+
9,/m/07q4ntr,"Bellow"
|
| 12 |
+
10,/m/07rwj3x,"Whoop"
|
| 13 |
+
11,/m/07sr1lc,"Yell"
|
| 14 |
+
12,/m/04gy_2,"Battle cry"
|
| 15 |
+
13,/t/dd00135,"Children shouting"
|
| 16 |
+
14,/m/03qc9zr,"Screaming"
|
| 17 |
+
15,/m/02rtxlg,"Whispering"
|
| 18 |
+
16,/m/01j3sz,"Laughter"
|
| 19 |
+
17,/t/dd00001,"Baby laughter"
|
| 20 |
+
18,/m/07r660_,"Giggle"
|
| 21 |
+
19,/m/07s04w4,"Snicker"
|
| 22 |
+
20,/m/07sq110,"Belly laugh"
|
| 23 |
+
21,/m/07rgt08,"Chuckle, chortle"
|
| 24 |
+
22,/m/0463cq4,"Crying, sobbing"
|
| 25 |
+
23,/t/dd00002,"Baby cry, infant cry"
|
| 26 |
+
24,/m/07qz6j3,"Whimper"
|
| 27 |
+
25,/m/07qw_06,"Wail, moan"
|
| 28 |
+
26,/m/07plz5l,"Sigh"
|
| 29 |
+
27,/m/015lz1,"Singing"
|
| 30 |
+
28,/m/0l14jd,"Choir"
|
| 31 |
+
29,/m/01swy6,"Yodeling"
|
| 32 |
+
30,/m/02bk07,"Chant"
|
| 33 |
+
31,/m/01c194,"Mantra"
|
| 34 |
+
32,/t/dd00003,"Male singing"
|
| 35 |
+
33,/t/dd00004,"Female singing"
|
| 36 |
+
34,/t/dd00005,"Child singing"
|
| 37 |
+
35,/t/dd00006,"Synthetic singing"
|
| 38 |
+
36,/m/06bxc,"Rapping"
|
| 39 |
+
37,/m/02fxyj,"Humming"
|
| 40 |
+
38,/m/07s2xch,"Groan"
|
| 41 |
+
39,/m/07r4k75,"Grunt"
|
| 42 |
+
40,/m/01w250,"Whistling"
|
| 43 |
+
41,/m/0lyf6,"Breathing"
|
| 44 |
+
42,/m/07mzm6,"Wheeze"
|
| 45 |
+
43,/m/01d3sd,"Snoring"
|
| 46 |
+
44,/m/07s0dtb,"Gasp"
|
| 47 |
+
45,/m/07pyy8b,"Pant"
|
| 48 |
+
46,/m/07q0yl5,"Snort"
|
| 49 |
+
47,/m/01b_21,"Cough"
|
| 50 |
+
48,/m/0dl9sf8,"Throat clearing"
|
| 51 |
+
49,/m/01hsr_,"Sneeze"
|
| 52 |
+
50,/m/07ppn3j,"Sniff"
|
| 53 |
+
51,/m/06h7j,"Run"
|
| 54 |
+
52,/m/07qv_x_,"Shuffle"
|
| 55 |
+
53,/m/07pbtc8,"Walk, footsteps"
|
| 56 |
+
54,/m/03cczk,"Chewing, mastication"
|
| 57 |
+
55,/m/07pdhp0,"Biting"
|
| 58 |
+
56,/m/0939n_,"Gargling"
|
| 59 |
+
57,/m/01g90h,"Stomach rumble"
|
| 60 |
+
58,/m/03q5_w,"Burping, eructation"
|
| 61 |
+
59,/m/02p3nc,"Hiccup"
|
| 62 |
+
60,/m/02_nn,"Fart"
|
| 63 |
+
61,/m/0k65p,"Hands"
|
| 64 |
+
62,/m/025_jnm,"Finger snapping"
|
| 65 |
+
63,/m/0l15bq,"Clapping"
|
| 66 |
+
64,/m/01jg02,"Heart sounds, heartbeat"
|
| 67 |
+
65,/m/01jg1z,"Heart murmur"
|
| 68 |
+
66,/m/053hz1,"Cheering"
|
| 69 |
+
67,/m/028ght,"Applause"
|
| 70 |
+
68,/m/07rkbfh,"Chatter"
|
| 71 |
+
69,/m/03qtwd,"Crowd"
|
| 72 |
+
70,/m/07qfr4h,"Hubbub, speech noise, speech babble"
|
| 73 |
+
71,/t/dd00013,"Children playing"
|
| 74 |
+
72,/m/0jbk,"Animal"
|
| 75 |
+
73,/m/068hy,"Domestic animals, pets"
|
| 76 |
+
74,/m/0bt9lr,"Dog"
|
| 77 |
+
75,/m/05tny_,"Bark"
|
| 78 |
+
76,/m/07r_k2n,"Yip"
|
| 79 |
+
77,/m/07qf0zm,"Howl"
|
| 80 |
+
78,/m/07rc7d9,"Bow-wow"
|
| 81 |
+
79,/m/0ghcn6,"Growling"
|
| 82 |
+
80,/t/dd00136,"Whimper (dog)"
|
| 83 |
+
81,/m/01yrx,"Cat"
|
| 84 |
+
82,/m/02yds9,"Purr"
|
| 85 |
+
83,/m/07qrkrw,"Meow"
|
| 86 |
+
84,/m/07rjwbb,"Hiss"
|
| 87 |
+
85,/m/07r81j2,"Caterwaul"
|
| 88 |
+
86,/m/0ch8v,"Livestock, farm animals, working animals"
|
| 89 |
+
87,/m/03k3r,"Horse"
|
| 90 |
+
88,/m/07rv9rh,"Clip-clop"
|
| 91 |
+
89,/m/07q5rw0,"Neigh, whinny"
|
| 92 |
+
90,/m/01xq0k1,"Cattle, bovinae"
|
| 93 |
+
91,/m/07rpkh9,"Moo"
|
| 94 |
+
92,/m/0239kh,"Cowbell"
|
| 95 |
+
93,/m/068zj,"Pig"
|
| 96 |
+
94,/t/dd00018,"Oink"
|
| 97 |
+
95,/m/03fwl,"Goat"
|
| 98 |
+
96,/m/07q0h5t,"Bleat"
|
| 99 |
+
97,/m/07bgp,"Sheep"
|
| 100 |
+
98,/m/025rv6n,"Fowl"
|
| 101 |
+
99,/m/09b5t,"Chicken, rooster"
|
| 102 |
+
100,/m/07st89h,"Cluck"
|
| 103 |
+
101,/m/07qn5dc,"Crowing, cock-a-doodle-doo"
|
| 104 |
+
102,/m/01rd7k,"Turkey"
|
| 105 |
+
103,/m/07svc2k,"Gobble"
|
| 106 |
+
104,/m/09ddx,"Duck"
|
| 107 |
+
105,/m/07qdb04,"Quack"
|
| 108 |
+
106,/m/0dbvp,"Goose"
|
| 109 |
+
107,/m/07qwf61,"Honk"
|
| 110 |
+
108,/m/01280g,"Wild animals"
|
| 111 |
+
109,/m/0cdnk,"Roaring cats (lions, tigers)"
|
| 112 |
+
110,/m/04cvmfc,"Roar"
|
| 113 |
+
111,/m/015p6,"Bird"
|
| 114 |
+
112,/m/020bb7,"Bird vocalization, bird call, bird song"
|
| 115 |
+
113,/m/07pggtn,"Chirp, tweet"
|
| 116 |
+
114,/m/07sx8x_,"Squawk"
|
| 117 |
+
115,/m/0h0rv,"Pigeon, dove"
|
| 118 |
+
116,/m/07r_25d,"Coo"
|
| 119 |
+
117,/m/04s8yn,"Crow"
|
| 120 |
+
118,/m/07r5c2p,"Caw"
|
| 121 |
+
119,/m/09d5_,"Owl"
|
| 122 |
+
120,/m/07r_80w,"Hoot"
|
| 123 |
+
121,/m/05_wcq,"Bird flight, flapping wings"
|
| 124 |
+
122,/m/01z5f,"Canidae, dogs, wolves"
|
| 125 |
+
123,/m/06hps,"Rodents, rats, mice"
|
| 126 |
+
124,/m/04rmv,"Mouse"
|
| 127 |
+
125,/m/07r4gkf,"Patter"
|
| 128 |
+
126,/m/03vt0,"Insect"
|
| 129 |
+
127,/m/09xqv,"Cricket"
|
| 130 |
+
128,/m/09f96,"Mosquito"
|
| 131 |
+
129,/m/0h2mp,"Fly, housefly"
|
| 132 |
+
130,/m/07pjwq1,"Buzz"
|
| 133 |
+
131,/m/01h3n,"Bee, wasp, etc."
|
| 134 |
+
132,/m/09ld4,"Frog"
|
| 135 |
+
133,/m/07st88b,"Croak"
|
| 136 |
+
134,/m/078jl,"Snake"
|
| 137 |
+
135,/m/07qn4z3,"Rattle"
|
| 138 |
+
136,/m/032n05,"Whale vocalization"
|
| 139 |
+
137,/m/04rlf,"Music"
|
| 140 |
+
138,/m/04szw,"Musical instrument"
|
| 141 |
+
139,/m/0fx80y,"Plucked string instrument"
|
| 142 |
+
140,/m/0342h,"Guitar"
|
| 143 |
+
141,/m/02sgy,"Electric guitar"
|
| 144 |
+
142,/m/018vs,"Bass guitar"
|
| 145 |
+
143,/m/042v_gx,"Acoustic guitar"
|
| 146 |
+
144,/m/06w87,"Steel guitar, slide guitar"
|
| 147 |
+
145,/m/01glhc,"Tapping (guitar technique)"
|
| 148 |
+
146,/m/07s0s5r,"Strum"
|
| 149 |
+
147,/m/018j2,"Banjo"
|
| 150 |
+
148,/m/0jtg0,"Sitar"
|
| 151 |
+
149,/m/04rzd,"Mandolin"
|
| 152 |
+
150,/m/01bns_,"Zither"
|
| 153 |
+
151,/m/07xzm,"Ukulele"
|
| 154 |
+
152,/m/05148p4,"Keyboard (musical)"
|
| 155 |
+
153,/m/05r5c,"Piano"
|
| 156 |
+
154,/m/01s0ps,"Electric piano"
|
| 157 |
+
155,/m/013y1f,"Organ"
|
| 158 |
+
156,/m/03xq_f,"Electronic organ"
|
| 159 |
+
157,/m/03gvt,"Hammond organ"
|
| 160 |
+
158,/m/0l14qv,"Synthesizer"
|
| 161 |
+
159,/m/01v1d8,"Sampler"
|
| 162 |
+
160,/m/03q5t,"Harpsichord"
|
| 163 |
+
161,/m/0l14md,"Percussion"
|
| 164 |
+
162,/m/02hnl,"Drum kit"
|
| 165 |
+
163,/m/0cfdd,"Drum machine"
|
| 166 |
+
164,/m/026t6,"Drum"
|
| 167 |
+
165,/m/06rvn,"Snare drum"
|
| 168 |
+
166,/m/03t3fj,"Rimshot"
|
| 169 |
+
167,/m/02k_mr,"Drum roll"
|
| 170 |
+
168,/m/0bm02,"Bass drum"
|
| 171 |
+
169,/m/011k_j,"Timpani"
|
| 172 |
+
170,/m/01p970,"Tabla"
|
| 173 |
+
171,/m/01qbl,"Cymbal"
|
| 174 |
+
172,/m/03qtq,"Hi-hat"
|
| 175 |
+
173,/m/01sm1g,"Wood block"
|
| 176 |
+
174,/m/07brj,"Tambourine"
|
| 177 |
+
175,/m/05r5wn,"Rattle (instrument)"
|
| 178 |
+
176,/m/0xzly,"Maraca"
|
| 179 |
+
177,/m/0mbct,"Gong"
|
| 180 |
+
178,/m/016622,"Tubular bells"
|
| 181 |
+
179,/m/0j45pbj,"Mallet percussion"
|
| 182 |
+
180,/m/0dwsp,"Marimba, xylophone"
|
| 183 |
+
181,/m/0dwtp,"Glockenspiel"
|
| 184 |
+
182,/m/0dwt5,"Vibraphone"
|
| 185 |
+
183,/m/0l156b,"Steelpan"
|
| 186 |
+
184,/m/05pd6,"Orchestra"
|
| 187 |
+
185,/m/01kcd,"Brass instrument"
|
| 188 |
+
186,/m/0319l,"French horn"
|
| 189 |
+
187,/m/07gql,"Trumpet"
|
| 190 |
+
188,/m/07c6l,"Trombone"
|
| 191 |
+
189,/m/0l14_3,"Bowed string instrument"
|
| 192 |
+
190,/m/02qmj0d,"String section"
|
| 193 |
+
191,/m/07y_7,"Violin, fiddle"
|
| 194 |
+
192,/m/0d8_n,"Pizzicato"
|
| 195 |
+
193,/m/01xqw,"Cello"
|
| 196 |
+
194,/m/02fsn,"Double bass"
|
| 197 |
+
195,/m/085jw,"Wind instrument, woodwind instrument"
|
| 198 |
+
196,/m/0l14j_,"Flute"
|
| 199 |
+
197,/m/06ncr,"Saxophone"
|
| 200 |
+
198,/m/01wy6,"Clarinet"
|
| 201 |
+
199,/m/03m5k,"Harp"
|
| 202 |
+
200,/m/0395lw,"Bell"
|
| 203 |
+
201,/m/03w41f,"Church bell"
|
| 204 |
+
202,/m/027m70_,"Jingle bell"
|
| 205 |
+
203,/m/0gy1t2s,"Bicycle bell"
|
| 206 |
+
204,/m/07n_g,"Tuning fork"
|
| 207 |
+
205,/m/0f8s22,"Chime"
|
| 208 |
+
206,/m/026fgl,"Wind chime"
|
| 209 |
+
207,/m/0150b9,"Change ringing (campanology)"
|
| 210 |
+
208,/m/03qjg,"Harmonica"
|
| 211 |
+
209,/m/0mkg,"Accordion"
|
| 212 |
+
210,/m/0192l,"Bagpipes"
|
| 213 |
+
211,/m/02bxd,"Didgeridoo"
|
| 214 |
+
212,/m/0l14l2,"Shofar"
|
| 215 |
+
213,/m/07kc_,"Theremin"
|
| 216 |
+
214,/m/0l14t7,"Singing bowl"
|
| 217 |
+
215,/m/01hgjl,"Scratching (performance technique)"
|
| 218 |
+
216,/m/064t9,"Pop music"
|
| 219 |
+
217,/m/0glt670,"Hip hop music"
|
| 220 |
+
218,/m/02cz_7,"Beatboxing"
|
| 221 |
+
219,/m/06by7,"Rock music"
|
| 222 |
+
220,/m/03lty,"Heavy metal"
|
| 223 |
+
221,/m/05r6t,"Punk rock"
|
| 224 |
+
222,/m/0dls3,"Grunge"
|
| 225 |
+
223,/m/0dl5d,"Progressive rock"
|
| 226 |
+
224,/m/07sbbz2,"Rock and roll"
|
| 227 |
+
225,/m/05w3f,"Psychedelic rock"
|
| 228 |
+
226,/m/06j6l,"Rhythm and blues"
|
| 229 |
+
227,/m/0gywn,"Soul music"
|
| 230 |
+
228,/m/06cqb,"Reggae"
|
| 231 |
+
229,/m/01lyv,"Country"
|
| 232 |
+
230,/m/015y_n,"Swing music"
|
| 233 |
+
231,/m/0gg8l,"Bluegrass"
|
| 234 |
+
232,/m/02x8m,"Funk"
|
| 235 |
+
233,/m/02w4v,"Folk music"
|
| 236 |
+
234,/m/06j64v,"Middle Eastern music"
|
| 237 |
+
235,/m/03_d0,"Jazz"
|
| 238 |
+
236,/m/026z9,"Disco"
|
| 239 |
+
237,/m/0ggq0m,"Classical music"
|
| 240 |
+
238,/m/05lls,"Opera"
|
| 241 |
+
239,/m/02lkt,"Electronic music"
|
| 242 |
+
240,/m/03mb9,"House music"
|
| 243 |
+
241,/m/07gxw,"Techno"
|
| 244 |
+
242,/m/07s72n,"Dubstep"
|
| 245 |
+
243,/m/0283d,"Drum and bass"
|
| 246 |
+
244,/m/0m0jc,"Electronica"
|
| 247 |
+
245,/m/08cyft,"Electronic dance music"
|
| 248 |
+
246,/m/0fd3y,"Ambient music"
|
| 249 |
+
247,/m/07lnk,"Trance music"
|
| 250 |
+
248,/m/0g293,"Music of Latin America"
|
| 251 |
+
249,/m/0ln16,"Salsa music"
|
| 252 |
+
250,/m/0326g,"Flamenco"
|
| 253 |
+
251,/m/0155w,"Blues"
|
| 254 |
+
252,/m/05fw6t,"Music for children"
|
| 255 |
+
253,/m/02v2lh,"New-age music"
|
| 256 |
+
254,/m/0y4f8,"Vocal music"
|
| 257 |
+
255,/m/0z9c,"A capella"
|
| 258 |
+
256,/m/0164x2,"Music of Africa"
|
| 259 |
+
257,/m/0145m,"Afrobeat"
|
| 260 |
+
258,/m/02mscn,"Christian music"
|
| 261 |
+
259,/m/016cjb,"Gospel music"
|
| 262 |
+
260,/m/028sqc,"Music of Asia"
|
| 263 |
+
261,/m/015vgc,"Carnatic music"
|
| 264 |
+
262,/m/0dq0md,"Music of Bollywood"
|
| 265 |
+
263,/m/06rqw,"Ska"
|
| 266 |
+
264,/m/02p0sh1,"Traditional music"
|
| 267 |
+
265,/m/05rwpb,"Independent music"
|
| 268 |
+
266,/m/074ft,"Song"
|
| 269 |
+
267,/m/025td0t,"Background music"
|
| 270 |
+
268,/m/02cjck,"Theme music"
|
| 271 |
+
269,/m/03r5q_,"Jingle (music)"
|
| 272 |
+
270,/m/0l14gg,"Soundtrack music"
|
| 273 |
+
271,/m/07pkxdp,"Lullaby"
|
| 274 |
+
272,/m/01z7dr,"Video game music"
|
| 275 |
+
273,/m/0140xf,"Christmas music"
|
| 276 |
+
274,/m/0ggx5q,"Dance music"
|
| 277 |
+
275,/m/04wptg,"Wedding music"
|
| 278 |
+
276,/t/dd00031,"Happy music"
|
| 279 |
+
277,/t/dd00032,"Funny music"
|
| 280 |
+
278,/t/dd00033,"Sad music"
|
| 281 |
+
279,/t/dd00034,"Tender music"
|
| 282 |
+
280,/t/dd00035,"Exciting music"
|
| 283 |
+
281,/t/dd00036,"Angry music"
|
| 284 |
+
282,/t/dd00037,"Scary music"
|
| 285 |
+
283,/m/03m9d0z,"Wind"
|
| 286 |
+
284,/m/09t49,"Rustling leaves"
|
| 287 |
+
285,/t/dd00092,"Wind noise (microphone)"
|
| 288 |
+
286,/m/0jb2l,"Thunderstorm"
|
| 289 |
+
287,/m/0ngt1,"Thunder"
|
| 290 |
+
288,/m/0838f,"Water"
|
| 291 |
+
289,/m/06mb1,"Rain"
|
| 292 |
+
290,/m/07r10fb,"Raindrop"
|
| 293 |
+
291,/t/dd00038,"Rain on surface"
|
| 294 |
+
292,/m/0j6m2,"Stream"
|
| 295 |
+
293,/m/0j2kx,"Waterfall"
|
| 296 |
+
294,/m/05kq4,"Ocean"
|
| 297 |
+
295,/m/034srq,"Waves, surf"
|
| 298 |
+
296,/m/06wzb,"Steam"
|
| 299 |
+
297,/m/07swgks,"Gurgling"
|
| 300 |
+
298,/m/02_41,"Fire"
|
| 301 |
+
299,/m/07pzfmf,"Crackle"
|
| 302 |
+
300,/m/07yv9,"Vehicle"
|
| 303 |
+
301,/m/019jd,"Boat, Water vehicle"
|
| 304 |
+
302,/m/0hsrw,"Sailboat, sailing ship"
|
| 305 |
+
303,/m/056ks2,"Rowboat, canoe, kayak"
|
| 306 |
+
304,/m/02rlv9,"Motorboat, speedboat"
|
| 307 |
+
305,/m/06q74,"Ship"
|
| 308 |
+
306,/m/012f08,"Motor vehicle (road)"
|
| 309 |
+
307,/m/0k4j,"Car"
|
| 310 |
+
308,/m/0912c9,"Vehicle horn, car horn, honking"
|
| 311 |
+
309,/m/07qv_d5,"Toot"
|
| 312 |
+
310,/m/02mfyn,"Car alarm"
|
| 313 |
+
311,/m/04gxbd,"Power windows, electric windows"
|
| 314 |
+
312,/m/07rknqz,"Skidding"
|
| 315 |
+
313,/m/0h9mv,"Tire squeal"
|
| 316 |
+
314,/t/dd00134,"Car passing by"
|
| 317 |
+
315,/m/0ltv,"Race car, auto racing"
|
| 318 |
+
316,/m/07r04,"Truck"
|
| 319 |
+
317,/m/0gvgw0,"Air brake"
|
| 320 |
+
318,/m/05x_td,"Air horn, truck horn"
|
| 321 |
+
319,/m/02rhddq,"Reversing beeps"
|
| 322 |
+
320,/m/03cl9h,"Ice cream truck, ice cream van"
|
| 323 |
+
321,/m/01bjv,"Bus"
|
| 324 |
+
322,/m/03j1ly,"Emergency vehicle"
|
| 325 |
+
323,/m/04qvtq,"Police car (siren)"
|
| 326 |
+
324,/m/012n7d,"Ambulance (siren)"
|
| 327 |
+
325,/m/012ndj,"Fire engine, fire truck (siren)"
|
| 328 |
+
326,/m/04_sv,"Motorcycle"
|
| 329 |
+
327,/m/0btp2,"Traffic noise, roadway noise"
|
| 330 |
+
328,/m/06d_3,"Rail transport"
|
| 331 |
+
329,/m/07jdr,"Train"
|
| 332 |
+
330,/m/04zmvq,"Train whistle"
|
| 333 |
+
331,/m/0284vy3,"Train horn"
|
| 334 |
+
332,/m/01g50p,"Railroad car, train wagon"
|
| 335 |
+
333,/t/dd00048,"Train wheels squealing"
|
| 336 |
+
334,/m/0195fx,"Subway, metro, underground"
|
| 337 |
+
335,/m/0k5j,"Aircraft"
|
| 338 |
+
336,/m/014yck,"Aircraft engine"
|
| 339 |
+
337,/m/04229,"Jet engine"
|
| 340 |
+
338,/m/02l6bg,"Propeller, airscrew"
|
| 341 |
+
339,/m/09ct_,"Helicopter"
|
| 342 |
+
340,/m/0cmf2,"Fixed-wing aircraft, airplane"
|
| 343 |
+
341,/m/0199g,"Bicycle"
|
| 344 |
+
342,/m/06_fw,"Skateboard"
|
| 345 |
+
343,/m/02mk9,"Engine"
|
| 346 |
+
344,/t/dd00065,"Light engine (high frequency)"
|
| 347 |
+
345,/m/08j51y,"Dental drill, dentist's drill"
|
| 348 |
+
346,/m/01yg9g,"Lawn mower"
|
| 349 |
+
347,/m/01j4z9,"Chainsaw"
|
| 350 |
+
348,/t/dd00066,"Medium engine (mid frequency)"
|
| 351 |
+
349,/t/dd00067,"Heavy engine (low frequency)"
|
| 352 |
+
350,/m/01h82_,"Engine knocking"
|
| 353 |
+
351,/t/dd00130,"Engine starting"
|
| 354 |
+
352,/m/07pb8fc,"Idling"
|
| 355 |
+
353,/m/07q2z82,"Accelerating, revving, vroom"
|
| 356 |
+
354,/m/02dgv,"Door"
|
| 357 |
+
355,/m/03wwcy,"Doorbell"
|
| 358 |
+
356,/m/07r67yg,"Ding-dong"
|
| 359 |
+
357,/m/02y_763,"Sliding door"
|
| 360 |
+
358,/m/07rjzl8,"Slam"
|
| 361 |
+
359,/m/07r4wb8,"Knock"
|
| 362 |
+
360,/m/07qcpgn,"Tap"
|
| 363 |
+
361,/m/07q6cd_,"Squeak"
|
| 364 |
+
362,/m/0642b4,"Cupboard open or close"
|
| 365 |
+
363,/m/0fqfqc,"Drawer open or close"
|
| 366 |
+
364,/m/04brg2,"Dishes, pots, and pans"
|
| 367 |
+
365,/m/023pjk,"Cutlery, silverware"
|
| 368 |
+
366,/m/07pn_8q,"Chopping (food)"
|
| 369 |
+
367,/m/0dxrf,"Frying (food)"
|
| 370 |
+
368,/m/0fx9l,"Microwave oven"
|
| 371 |
+
369,/m/02pjr4,"Blender"
|
| 372 |
+
370,/m/02jz0l,"Water tap, faucet"
|
| 373 |
+
371,/m/0130jx,"Sink (filling or washing)"
|
| 374 |
+
372,/m/03dnzn,"Bathtub (filling or washing)"
|
| 375 |
+
373,/m/03wvsk,"Hair dryer"
|
| 376 |
+
374,/m/01jt3m,"Toilet flush"
|
| 377 |
+
375,/m/012xff,"Toothbrush"
|
| 378 |
+
376,/m/04fgwm,"Electric toothbrush"
|
| 379 |
+
377,/m/0d31p,"Vacuum cleaner"
|
| 380 |
+
378,/m/01s0vc,"Zipper (clothing)"
|
| 381 |
+
379,/m/03v3yw,"Keys jangling"
|
| 382 |
+
380,/m/0242l,"Coin (dropping)"
|
| 383 |
+
381,/m/01lsmm,"Scissors"
|
| 384 |
+
382,/m/02g901,"Electric shaver, electric razor"
|
| 385 |
+
383,/m/05rj2,"Shuffling cards"
|
| 386 |
+
384,/m/0316dw,"Typing"
|
| 387 |
+
385,/m/0c2wf,"Typewriter"
|
| 388 |
+
386,/m/01m2v,"Computer keyboard"
|
| 389 |
+
387,/m/081rb,"Writing"
|
| 390 |
+
388,/m/07pp_mv,"Alarm"
|
| 391 |
+
389,/m/07cx4,"Telephone"
|
| 392 |
+
390,/m/07pp8cl,"Telephone bell ringing"
|
| 393 |
+
391,/m/01hnzm,"Ringtone"
|
| 394 |
+
392,/m/02c8p,"Telephone dialing, DTMF"
|
| 395 |
+
393,/m/015jpf,"Dial tone"
|
| 396 |
+
394,/m/01z47d,"Busy signal"
|
| 397 |
+
395,/m/046dlr,"Alarm clock"
|
| 398 |
+
396,/m/03kmc9,"Siren"
|
| 399 |
+
397,/m/0dgbq,"Civil defense siren"
|
| 400 |
+
398,/m/030rvx,"Buzzer"
|
| 401 |
+
399,/m/01y3hg,"Smoke detector, smoke alarm"
|
| 402 |
+
400,/m/0c3f7m,"Fire alarm"
|
| 403 |
+
401,/m/04fq5q,"Foghorn"
|
| 404 |
+
402,/m/0l156k,"Whistle"
|
| 405 |
+
403,/m/06hck5,"Steam whistle"
|
| 406 |
+
404,/t/dd00077,"Mechanisms"
|
| 407 |
+
405,/m/02bm9n,"Ratchet, pawl"
|
| 408 |
+
406,/m/01x3z,"Clock"
|
| 409 |
+
407,/m/07qjznt,"Tick"
|
| 410 |
+
408,/m/07qjznl,"Tick-tock"
|
| 411 |
+
409,/m/0l7xg,"Gears"
|
| 412 |
+
410,/m/05zc1,"Pulleys"
|
| 413 |
+
411,/m/0llzx,"Sewing machine"
|
| 414 |
+
412,/m/02x984l,"Mechanical fan"
|
| 415 |
+
413,/m/025wky1,"Air conditioning"
|
| 416 |
+
414,/m/024dl,"Cash register"
|
| 417 |
+
415,/m/01m4t,"Printer"
|
| 418 |
+
416,/m/0dv5r,"Camera"
|
| 419 |
+
417,/m/07bjf,"Single-lens reflex camera"
|
| 420 |
+
418,/m/07k1x,"Tools"
|
| 421 |
+
419,/m/03l9g,"Hammer"
|
| 422 |
+
420,/m/03p19w,"Jackhammer"
|
| 423 |
+
421,/m/01b82r,"Sawing"
|
| 424 |
+
422,/m/02p01q,"Filing (rasp)"
|
| 425 |
+
423,/m/023vsd,"Sanding"
|
| 426 |
+
424,/m/0_ksk,"Power tool"
|
| 427 |
+
425,/m/01d380,"Drill"
|
| 428 |
+
426,/m/014zdl,"Explosion"
|
| 429 |
+
427,/m/032s66,"Gunshot, gunfire"
|
| 430 |
+
428,/m/04zjc,"Machine gun"
|
| 431 |
+
429,/m/02z32qm,"Fusillade"
|
| 432 |
+
430,/m/0_1c,"Artillery fire"
|
| 433 |
+
431,/m/073cg4,"Cap gun"
|
| 434 |
+
432,/m/0g6b5,"Fireworks"
|
| 435 |
+
433,/g/122z_qxw,"Firecracker"
|
| 436 |
+
434,/m/07qsvvw,"Burst, pop"
|
| 437 |
+
435,/m/07pxg6y,"Eruption"
|
| 438 |
+
436,/m/07qqyl4,"Boom"
|
| 439 |
+
437,/m/083vt,"Wood"
|
| 440 |
+
438,/m/07pczhz,"Chop"
|
| 441 |
+
439,/m/07pl1bw,"Splinter"
|
| 442 |
+
440,/m/07qs1cx,"Crack"
|
| 443 |
+
441,/m/039jq,"Glass"
|
| 444 |
+
442,/m/07q7njn,"Chink, clink"
|
| 445 |
+
443,/m/07rn7sz,"Shatter"
|
| 446 |
+
444,/m/04k94,"Liquid"
|
| 447 |
+
445,/m/07rrlb6,"Splash, splatter"
|
| 448 |
+
446,/m/07p6mqd,"Slosh"
|
| 449 |
+
447,/m/07qlwh6,"Squish"
|
| 450 |
+
448,/m/07r5v4s,"Drip"
|
| 451 |
+
449,/m/07prgkl,"Pour"
|
| 452 |
+
450,/m/07pqc89,"Trickle, dribble"
|
| 453 |
+
451,/t/dd00088,"Gush"
|
| 454 |
+
452,/m/07p7b8y,"Fill (with liquid)"
|
| 455 |
+
453,/m/07qlf79,"Spray"
|
| 456 |
+
454,/m/07ptzwd,"Pump (liquid)"
|
| 457 |
+
455,/m/07ptfmf,"Stir"
|
| 458 |
+
456,/m/0dv3j,"Boiling"
|
| 459 |
+
457,/m/0790c,"Sonar"
|
| 460 |
+
458,/m/0dl83,"Arrow"
|
| 461 |
+
459,/m/07rqsjt,"Whoosh, swoosh, swish"
|
| 462 |
+
460,/m/07qnq_y,"Thump, thud"
|
| 463 |
+
461,/m/07rrh0c,"Thunk"
|
| 464 |
+
462,/m/0b_fwt,"Electronic tuner"
|
| 465 |
+
463,/m/02rr_,"Effects unit"
|
| 466 |
+
464,/m/07m2kt,"Chorus effect"
|
| 467 |
+
465,/m/018w8,"Basketball bounce"
|
| 468 |
+
466,/m/07pws3f,"Bang"
|
| 469 |
+
467,/m/07ryjzk,"Slap, smack"
|
| 470 |
+
468,/m/07rdhzs,"Whack, thwack"
|
| 471 |
+
469,/m/07pjjrj,"Smash, crash"
|
| 472 |
+
470,/m/07pc8lb,"Breaking"
|
| 473 |
+
471,/m/07pqn27,"Bouncing"
|
| 474 |
+
472,/m/07rbp7_,"Whip"
|
| 475 |
+
473,/m/07pyf11,"Flap"
|
| 476 |
+
474,/m/07qb_dv,"Scratch"
|
| 477 |
+
475,/m/07qv4k0,"Scrape"
|
| 478 |
+
476,/m/07pdjhy,"Rub"
|
| 479 |
+
477,/m/07s8j8t,"Roll"
|
| 480 |
+
478,/m/07plct2,"Crushing"
|
| 481 |
+
479,/t/dd00112,"Crumpling, crinkling"
|
| 482 |
+
480,/m/07qcx4z,"Tearing"
|
| 483 |
+
481,/m/02fs_r,"Beep, bleep"
|
| 484 |
+
482,/m/07qwdck,"Ping"
|
| 485 |
+
483,/m/07phxs1,"Ding"
|
| 486 |
+
484,/m/07rv4dm,"Clang"
|
| 487 |
+
485,/m/07s02z0,"Squeal"
|
| 488 |
+
486,/m/07qh7jl,"Creak"
|
| 489 |
+
487,/m/07qwyj0,"Rustle"
|
| 490 |
+
488,/m/07s34ls,"Whir"
|
| 491 |
+
489,/m/07qmpdm,"Clatter"
|
| 492 |
+
490,/m/07p9k1k,"Sizzle"
|
| 493 |
+
491,/m/07qc9xj,"Clicking"
|
| 494 |
+
492,/m/07rwm0c,"Clickety-clack"
|
| 495 |
+
493,/m/07phhsh,"Rumble"
|
| 496 |
+
494,/m/07qyrcz,"Plop"
|
| 497 |
+
495,/m/07qfgpx,"Jingle, tinkle"
|
| 498 |
+
496,/m/07rcgpl,"Hum"
|
| 499 |
+
497,/m/07p78v5,"Zing"
|
| 500 |
+
498,/t/dd00121,"Boing"
|
| 501 |
+
499,/m/07s12q4,"Crunch"
|
| 502 |
+
500,/m/028v0c,"Silence"
|
| 503 |
+
501,/m/01v_m0,"Sine wave"
|
| 504 |
+
502,/m/0b9m1,"Harmonic"
|
| 505 |
+
503,/m/0hdsk,"Chirp tone"
|
| 506 |
+
504,/m/0c1dj,"Sound effect"
|
| 507 |
+
505,/m/07pt_g0,"Pulse"
|
| 508 |
+
506,/t/dd00125,"Inside, small room"
|
| 509 |
+
507,/t/dd00126,"Inside, large room or hall"
|
| 510 |
+
508,/t/dd00127,"Inside, public space"
|
| 511 |
+
509,/t/dd00128,"Outside, urban or manmade"
|
| 512 |
+
510,/t/dd00129,"Outside, rural or natural"
|
| 513 |
+
511,/m/01b9nn,"Reverberation"
|
| 514 |
+
512,/m/01jnbd,"Echo"
|
| 515 |
+
513,/m/096m7z,"Noise"
|
| 516 |
+
514,/m/06_y0by,"Environmental noise"
|
| 517 |
+
515,/m/07rgkc5,"Static"
|
| 518 |
+
516,/m/06xkwv,"Mains hum"
|
| 519 |
+
517,/m/0g12c5,"Distortion"
|
| 520 |
+
518,/m/08p9q4,"Sidetone"
|
| 521 |
+
519,/m/07szfh9,"Cacophony"
|
| 522 |
+
520,/m/0chx_,"White noise"
|
| 523 |
+
521,/m/0cj0r,"Pink noise"
|
| 524 |
+
522,/m/07p_0gm,"Throbbing"
|
| 525 |
+
523,/m/01jwx6,"Vibration"
|
| 526 |
+
524,/m/07c52,"Television"
|
| 527 |
+
525,/m/06bz3,"Radio"
|
| 528 |
+
526,/m/07hvw1,"Field recording"
|
RAG/Knowledge_Database/languagebind_main/a_cls/dataloader.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# @Time : 6/19/21 12:23 AM
|
| 3 |
+
# @Author : Yuan Gong
|
| 4 |
+
# @Affiliation : Massachusetts Institute of Technology
|
| 5 |
+
# @Email : yuangong@mit.edu
|
| 6 |
+
# @File : dataloader.py
|
| 7 |
+
|
| 8 |
+
# modified from:
|
| 9 |
+
# Author: David Harwath
|
| 10 |
+
# with some functions borrowed from https://github.com/SeanNaren/deepspeech.pytorch
|
| 11 |
+
|
| 12 |
+
import csv
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
import torchaudio
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn.functional
|
| 20 |
+
from torch.utils.data import Dataset
|
| 21 |
+
import random
|
| 22 |
+
|
| 23 |
+
def make_midname_dict(label_csv):
|
| 24 |
+
index_lookup = {}
|
| 25 |
+
with open(label_csv, 'r') as f:
|
| 26 |
+
csv_reader = csv.DictReader(f)
|
| 27 |
+
line_count = 0
|
| 28 |
+
for row in csv_reader:
|
| 29 |
+
index_lookup[row['mid']] = row['display_name']
|
| 30 |
+
line_count += 1
|
| 31 |
+
return index_lookup
|
| 32 |
+
|
| 33 |
+
def make_index_dict(label_csv):
|
| 34 |
+
index_lookup = {}
|
| 35 |
+
with open(label_csv, 'r') as f:
|
| 36 |
+
csv_reader = csv.DictReader(f)
|
| 37 |
+
line_count = 0
|
| 38 |
+
for row in csv_reader:
|
| 39 |
+
index_lookup[row['mid']] = row['index']
|
| 40 |
+
line_count += 1
|
| 41 |
+
return index_lookup
|
| 42 |
+
|
| 43 |
+
def make_name_dict(label_csv):
|
| 44 |
+
name_lookup = {}
|
| 45 |
+
with open(label_csv, 'r') as f:
|
| 46 |
+
csv_reader = csv.DictReader(f)
|
| 47 |
+
line_count = 0
|
| 48 |
+
for row in csv_reader:
|
| 49 |
+
name_lookup[row['index']] = row['display_name']
|
| 50 |
+
line_count += 1
|
| 51 |
+
return name_lookup
|
| 52 |
+
|
| 53 |
+
def lookup_list(index_list, label_csv):
|
| 54 |
+
label_list = []
|
| 55 |
+
table = make_name_dict(label_csv)
|
| 56 |
+
for item in index_list:
|
| 57 |
+
label_list.append(table[item])
|
| 58 |
+
return label_list
|
| 59 |
+
|
| 60 |
+
def preemphasis(signal,coeff=0.97):
|
| 61 |
+
"""perform preemphasis on the input signal.
|
| 62 |
+
|
| 63 |
+
:param signal: The signal to filter.
|
| 64 |
+
:param coeff: The preemphasis coefficient. 0 is none, default 0.97.
|
| 65 |
+
:returns: the filtered signal.
|
| 66 |
+
"""
|
| 67 |
+
return np.append(signal[0],signal[1:]-coeff*signal[:-1])
|
| 68 |
+
|
| 69 |
+
class AudiosetDataset(Dataset):
|
| 70 |
+
def __init__(self, dataset_json_file, audio_conf, label_csv=None):
|
| 71 |
+
"""
|
| 72 |
+
Dataset that manages audio recordings
|
| 73 |
+
:param audio_conf: Dictionary containing the audio loading and preprocessing settings
|
| 74 |
+
:param dataset_json_file
|
| 75 |
+
"""
|
| 76 |
+
self.datapath = dataset_json_file
|
| 77 |
+
with open(dataset_json_file, 'r') as fp:
|
| 78 |
+
data_json = json.load(fp)
|
| 79 |
+
self.data = data_json['data']
|
| 80 |
+
self.index_dict = make_index_dict(label_csv)
|
| 81 |
+
self.label_num = len(self.index_dict)
|
| 82 |
+
|
| 83 |
+
def __getitem__(self, index):
|
| 84 |
+
datum = self.data[index]
|
| 85 |
+
label_indices = np.zeros(self.label_num)
|
| 86 |
+
try:
|
| 87 |
+
fbank, mix_lambda = self._wav2fbank(datum['wav'])
|
| 88 |
+
except Exception as e:
|
| 89 |
+
logging.warning(f"Error at {datum['wav']} with \"{e}\"")
|
| 90 |
+
return self.__getitem__(random.randint(0, self.__len__()-1))
|
| 91 |
+
for label_str in datum['labels'].split(','):
|
| 92 |
+
label_indices[int(self.index_dict[label_str])] = 1.0
|
| 93 |
+
|
| 94 |
+
label_indices = torch.FloatTensor(label_indices)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
return fbank, label_indices
|
| 98 |
+
|
| 99 |
+
def __len__(self):
|
| 100 |
+
return len(self.data)
|
RAG/Knowledge_Database/languagebind_main/a_cls/datasets.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from data.build_datasets import DataInfo
|
| 6 |
+
from data.process_audio import get_audio_transform, torchaudio_loader
|
| 7 |
+
from torchvision import datasets
|
| 8 |
+
|
| 9 |
+
# -*- coding: utf-8 -*-
|
| 10 |
+
# @Time : 6/19/21 12:23 AM
|
| 11 |
+
# @Author : Yuan Gong
|
| 12 |
+
# @Affiliation : Massachusetts Institute of Technology
|
| 13 |
+
# @Email : yuangong@mit.edu
|
| 14 |
+
# @File : dataloader.py
|
| 15 |
+
|
| 16 |
+
# modified from:
|
| 17 |
+
# Author: David Harwath
|
| 18 |
+
# with some functions borrowed from https://github.com/SeanNaren/deepspeech.pytorch
|
| 19 |
+
|
| 20 |
+
import csv
|
| 21 |
+
import json
|
| 22 |
+
import logging
|
| 23 |
+
|
| 24 |
+
import torchaudio
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn.functional
|
| 28 |
+
from torch.utils.data import Dataset
|
| 29 |
+
import random
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def make_index_dict(label_csv):
|
| 33 |
+
index_lookup = {}
|
| 34 |
+
with open(label_csv, 'r') as f:
|
| 35 |
+
csv_reader = csv.DictReader(f)
|
| 36 |
+
line_count = 0
|
| 37 |
+
for row in csv_reader:
|
| 38 |
+
index_lookup[row['mid']] = row['index']
|
| 39 |
+
line_count += 1
|
| 40 |
+
return index_lookup
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class AudiosetDataset(Dataset):
|
| 44 |
+
def __init__(self, args, transform, loader):
|
| 45 |
+
self.audio_root = '/apdcephfs_cq3/share_1311970/downstream_datasets/Audio/audioset/eval_segments'
|
| 46 |
+
dataset_json_file = '/apdcephfs_cq3/share_1311970/downstream_datasets/Audio/audioset/filter_eval.json'
|
| 47 |
+
label_csv = '/apdcephfs_cq3/share_1311970/downstream_datasets/Audio/audioset/class_labels_indices.csv'
|
| 48 |
+
with open(dataset_json_file, 'r') as fp:
|
| 49 |
+
data_json = json.load(fp)
|
| 50 |
+
self.data = data_json['data']
|
| 51 |
+
self.index_dict = make_index_dict(label_csv)
|
| 52 |
+
self.label_num = len(self.index_dict)
|
| 53 |
+
|
| 54 |
+
self.args = args
|
| 55 |
+
self.transform = transform
|
| 56 |
+
self.loader = loader
|
| 57 |
+
|
| 58 |
+
def __getitem__(self, index):
|
| 59 |
+
datum = self.data[index]
|
| 60 |
+
label_indices = np.zeros(self.label_num)
|
| 61 |
+
for label_str in datum['labels'].split(','):
|
| 62 |
+
label_indices[int(self.index_dict[label_str])] = 1.0
|
| 63 |
+
label_indices = torch.FloatTensor(label_indices)
|
| 64 |
+
|
| 65 |
+
audio = self.loader(os.path.join(self.audio_root, datum['wav']))
|
| 66 |
+
audio_data = self.transform(audio)
|
| 67 |
+
return audio_data, label_indices
|
| 68 |
+
|
| 69 |
+
def __len__(self):
|
| 70 |
+
return len(self.data)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def is_valid_file(path):
|
| 75 |
+
return True
|
| 76 |
+
|
| 77 |
+
def get_audio_dataset(args):
|
| 78 |
+
data_path = args.audio_data_path
|
| 79 |
+
transform = get_audio_transform(args)
|
| 80 |
+
|
| 81 |
+
if args.val_a_cls_data.lower() == 'audioset':
|
| 82 |
+
dataset = AudiosetDataset(args, transform=transform, loader=torchaudio_loader)
|
| 83 |
+
else:
|
| 84 |
+
dataset = datasets.ImageFolder(data_path, transform=transform, loader=torchaudio_loader, is_valid_file=is_valid_file)
|
| 85 |
+
|
| 86 |
+
dataloader = torch.utils.data.DataLoader(
|
| 87 |
+
dataset,
|
| 88 |
+
batch_size=args.batch_size,
|
| 89 |
+
num_workers=args.workers,
|
| 90 |
+
sampler=None,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
return DataInfo(dataloader=dataloader, sampler=None)
|
RAG/Knowledge_Database/languagebind_main/a_cls/filter_eval_audio.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os.path
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
|
| 5 |
+
with open(r"G:\audioset\audioset\zip_audios\16k\eval.json", 'r') as f:
|
| 6 |
+
data = json.load(f)['data']
|
| 7 |
+
|
| 8 |
+
new_data = []
|
| 9 |
+
total = 0
|
| 10 |
+
success = 0
|
| 11 |
+
for i in tqdm(data):
|
| 12 |
+
total += 1
|
| 13 |
+
video_id = os.path.basename(i['wav'])
|
| 14 |
+
new_video_id = 'Y' + video_id
|
| 15 |
+
i['wav'] = new_video_id
|
| 16 |
+
if os.path.exists(f"G:/audioset/audioset/zip_audios/eval_segments/{i['wav']}") and not video_id.startswith('mW3S0u8bj58'):
|
| 17 |
+
new_data.append(i)
|
| 18 |
+
success += 1
|
| 19 |
+
print(total, success, total-success)
|
| 20 |
+
with open(r"G:\audioset\audioset\zip_audios\16k\filter_eval.json", 'w') as f:
|
| 21 |
+
data = json.dump({'data': new_data}, f, indent=2)
|
RAG/Knowledge_Database/languagebind_main/a_cls/precision.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from contextlib import suppress
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_autocast(precision):
|
| 6 |
+
if precision == 'amp':
|
| 7 |
+
return torch.cuda.amp.autocast
|
| 8 |
+
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
|
| 9 |
+
# amp_bfloat16 is more stable than amp float16 for clip training
|
| 10 |
+
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
|
| 11 |
+
else:
|
| 12 |
+
return suppress
|
RAG/Knowledge_Database/languagebind_main/a_cls/stats.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from scipy import stats
|
| 3 |
+
from sklearn import metrics
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
def d_prime(auc):
|
| 7 |
+
standard_normal = stats.norm()
|
| 8 |
+
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
|
| 9 |
+
return d_prime
|
| 10 |
+
|
| 11 |
+
def calculate_stats(output, target):
|
| 12 |
+
"""Calculate statistics including mAP, AUC, etc.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
output: 2d array, (samples_num, classes_num)
|
| 16 |
+
target: 2d array, (samples_num, classes_num)
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
stats: list of statistic of each class.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
classes_num = target.shape[-1]
|
| 23 |
+
stats = []
|
| 24 |
+
|
| 25 |
+
# Accuracy, only used for single-label classification such as esc-50, not for multiple label one such as AudioSet
|
| 26 |
+
acc = metrics.accuracy_score(np.argmax(target, 1), np.argmax(output, 1))
|
| 27 |
+
|
| 28 |
+
# Class-wise statistics
|
| 29 |
+
for k in range(classes_num):
|
| 30 |
+
|
| 31 |
+
# Average precision
|
| 32 |
+
avg_precision = metrics.average_precision_score(
|
| 33 |
+
target[:, k], output[:, k], average=None)
|
| 34 |
+
|
| 35 |
+
# AUC
|
| 36 |
+
auc = metrics.roc_auc_score(target[:, k], output[:, k], average=None)
|
| 37 |
+
|
| 38 |
+
# Precisions, recalls
|
| 39 |
+
(precisions, recalls, thresholds) = metrics.precision_recall_curve(
|
| 40 |
+
target[:, k], output[:, k])
|
| 41 |
+
|
| 42 |
+
# FPR, TPR
|
| 43 |
+
(fpr, tpr, thresholds) = metrics.roc_curve(target[:, k], output[:, k])
|
| 44 |
+
|
| 45 |
+
save_every_steps = 1000 # Sample statistics to reduce size
|
| 46 |
+
dict = {'precisions': precisions[0::save_every_steps],
|
| 47 |
+
'recalls': recalls[0::save_every_steps],
|
| 48 |
+
'AP': avg_precision,
|
| 49 |
+
'fpr': fpr[0::save_every_steps],
|
| 50 |
+
'fnr': 1. - tpr[0::save_every_steps],
|
| 51 |
+
'auc': auc,
|
| 52 |
+
# note acc is not class-wise, this is just to keep consistent with other metrics
|
| 53 |
+
'acc': acc
|
| 54 |
+
}
|
| 55 |
+
stats.append(dict)
|
| 56 |
+
|
| 57 |
+
return stats
|
RAG/Knowledge_Database/languagebind_main/a_cls/util.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import pickle
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import random
|
| 7 |
+
from collections import namedtuple
|
| 8 |
+
|
| 9 |
+
def calc_recalls(S):
|
| 10 |
+
"""
|
| 11 |
+
Computes recall at 1, 5, and 10 given a similarity matrix S.
|
| 12 |
+
By convention, rows of S are assumed to correspond to images and columns are captions.
|
| 13 |
+
"""
|
| 14 |
+
assert(S.dim() == 2)
|
| 15 |
+
assert(S.size(0) == S.size(1))
|
| 16 |
+
if isinstance(S, torch.autograd.Variable):
|
| 17 |
+
S = S.data
|
| 18 |
+
n = S.size(0)
|
| 19 |
+
A2I_scores, A2I_ind = S.topk(10, 0)
|
| 20 |
+
I2A_scores, I2A_ind = S.topk(10, 1)
|
| 21 |
+
A_r1 = AverageMeter()
|
| 22 |
+
A_r5 = AverageMeter()
|
| 23 |
+
A_r10 = AverageMeter()
|
| 24 |
+
I_r1 = AverageMeter()
|
| 25 |
+
I_r5 = AverageMeter()
|
| 26 |
+
I_r10 = AverageMeter()
|
| 27 |
+
for i in range(n):
|
| 28 |
+
A_foundind = -1
|
| 29 |
+
I_foundind = -1
|
| 30 |
+
for ind in range(10):
|
| 31 |
+
if A2I_ind[ind, i] == i:
|
| 32 |
+
I_foundind = ind
|
| 33 |
+
if I2A_ind[i, ind] == i:
|
| 34 |
+
A_foundind = ind
|
| 35 |
+
# do r1s
|
| 36 |
+
if A_foundind == 0:
|
| 37 |
+
A_r1.update(1)
|
| 38 |
+
else:
|
| 39 |
+
A_r1.update(0)
|
| 40 |
+
if I_foundind == 0:
|
| 41 |
+
I_r1.update(1)
|
| 42 |
+
else:
|
| 43 |
+
I_r1.update(0)
|
| 44 |
+
# do r5s
|
| 45 |
+
if A_foundind >= 0 and A_foundind < 5:
|
| 46 |
+
A_r5.update(1)
|
| 47 |
+
else:
|
| 48 |
+
A_r5.update(0)
|
| 49 |
+
if I_foundind >= 0 and I_foundind < 5:
|
| 50 |
+
I_r5.update(1)
|
| 51 |
+
else:
|
| 52 |
+
I_r5.update(0)
|
| 53 |
+
# do r10s
|
| 54 |
+
if A_foundind >= 0 and A_foundind < 10:
|
| 55 |
+
A_r10.update(1)
|
| 56 |
+
else:
|
| 57 |
+
A_r10.update(0)
|
| 58 |
+
if I_foundind >= 0 and I_foundind < 10:
|
| 59 |
+
I_r10.update(1)
|
| 60 |
+
else:
|
| 61 |
+
I_r10.update(0)
|
| 62 |
+
|
| 63 |
+
recalls = {'A_r1':A_r1.avg, 'A_r5':A_r5.avg, 'A_r10':A_r10.avg,
|
| 64 |
+
'I_r1':I_r1.avg, 'I_r5':I_r5.avg, 'I_r10':I_r10.avg}
|
| 65 |
+
#'A_meanR':A_meanR.avg, 'I_meanR':I_meanR.avg}
|
| 66 |
+
|
| 67 |
+
return recalls
|
| 68 |
+
|
| 69 |
+
def computeMatchmap(I, A):
|
| 70 |
+
assert(I.dim() == 3)
|
| 71 |
+
assert(A.dim() == 2)
|
| 72 |
+
D = I.size(0)
|
| 73 |
+
H = I.size(1)
|
| 74 |
+
W = I.size(2)
|
| 75 |
+
T = A.size(1)
|
| 76 |
+
Ir = I.view(D, -1).t()
|
| 77 |
+
matchmap = torch.mm(Ir, A)
|
| 78 |
+
matchmap = matchmap.view(H, W, T)
|
| 79 |
+
return matchmap
|
| 80 |
+
|
| 81 |
+
def matchmapSim(M, simtype):
|
| 82 |
+
assert(M.dim() == 3)
|
| 83 |
+
if simtype == 'SISA':
|
| 84 |
+
return M.mean()
|
| 85 |
+
elif simtype == 'MISA':
|
| 86 |
+
M_maxH, _ = M.max(0)
|
| 87 |
+
M_maxHW, _ = M_maxH.max(0)
|
| 88 |
+
return M_maxHW.mean()
|
| 89 |
+
elif simtype == 'SIMA':
|
| 90 |
+
M_maxT, _ = M.max(2)
|
| 91 |
+
return M_maxT.mean()
|
| 92 |
+
else:
|
| 93 |
+
raise ValueError
|
| 94 |
+
|
| 95 |
+
def sampled_margin_rank_loss(image_outputs, audio_outputs, nframes, margin=1., simtype='MISA'):
|
| 96 |
+
"""
|
| 97 |
+
Computes the triplet margin ranking loss for each anchor image/caption pair
|
| 98 |
+
The impostor image/caption is randomly sampled from the minibatch
|
| 99 |
+
"""
|
| 100 |
+
assert(image_outputs.dim() == 4)
|
| 101 |
+
assert(audio_outputs.dim() == 3)
|
| 102 |
+
n = image_outputs.size(0)
|
| 103 |
+
loss = torch.zeros(1, device=image_outputs.device, requires_grad=True)
|
| 104 |
+
for i in range(n):
|
| 105 |
+
I_imp_ind = i
|
| 106 |
+
A_imp_ind = i
|
| 107 |
+
while I_imp_ind == i:
|
| 108 |
+
I_imp_ind = np.random.randint(0, n)
|
| 109 |
+
while A_imp_ind == i:
|
| 110 |
+
A_imp_ind = np.random.randint(0, n)
|
| 111 |
+
nF = nframes[i]
|
| 112 |
+
nFimp = nframes[A_imp_ind]
|
| 113 |
+
anchorsim = matchmapSim(computeMatchmap(image_outputs[i], audio_outputs[i][:, 0:nF]), simtype)
|
| 114 |
+
Iimpsim = matchmapSim(computeMatchmap(image_outputs[I_imp_ind], audio_outputs[i][:, 0:nF]), simtype)
|
| 115 |
+
Aimpsim = matchmapSim(computeMatchmap(image_outputs[i], audio_outputs[A_imp_ind][:, 0:nFimp]), simtype)
|
| 116 |
+
A2I_simdif = margin + Iimpsim - anchorsim
|
| 117 |
+
if (A2I_simdif.data > 0).all():
|
| 118 |
+
loss = loss + A2I_simdif
|
| 119 |
+
I2A_simdif = margin + Aimpsim - anchorsim
|
| 120 |
+
if (I2A_simdif.data > 0).all():
|
| 121 |
+
loss = loss + I2A_simdif
|
| 122 |
+
loss = loss / n
|
| 123 |
+
return loss
|
| 124 |
+
|
| 125 |
+
def compute_matchmap_similarity_matrix(image_outputs, audio_outputs, nframes, simtype='MISA'):
|
| 126 |
+
"""
|
| 127 |
+
Assumes image_outputs is a (batchsize, embedding_dim, rows, height) tensor
|
| 128 |
+
Assumes audio_outputs is a (batchsize, embedding_dim, 1, time) tensor
|
| 129 |
+
Returns similarity matrix S where images are rows and audios are along the columns
|
| 130 |
+
"""
|
| 131 |
+
assert(image_outputs.dim() == 4)
|
| 132 |
+
assert(audio_outputs.dim() == 3)
|
| 133 |
+
n = image_outputs.size(0)
|
| 134 |
+
S = torch.zeros(n, n, device=image_outputs.device)
|
| 135 |
+
for image_idx in range(n):
|
| 136 |
+
for audio_idx in range(n):
|
| 137 |
+
nF = max(1, nframes[audio_idx])
|
| 138 |
+
S[image_idx, audio_idx] = matchmapSim(computeMatchmap(image_outputs[image_idx], audio_outputs[audio_idx][:, 0:nF]), simtype)
|
| 139 |
+
return S
|
| 140 |
+
|
| 141 |
+
def compute_pooldot_similarity_matrix(image_outputs, audio_outputs, nframes):
|
| 142 |
+
"""
|
| 143 |
+
Assumes image_outputs is a (batchsize, embedding_dim, rows, height) tensor
|
| 144 |
+
Assumes audio_outputs is a (batchsize, embedding_dim, 1, time) tensor
|
| 145 |
+
Returns similarity matrix S where images are rows and audios are along the columns
|
| 146 |
+
S[i][j] is computed as the dot product between the meanpooled embeddings of
|
| 147 |
+
the ith image output and jth audio output
|
| 148 |
+
"""
|
| 149 |
+
assert(image_outputs.dim() == 4)
|
| 150 |
+
assert(audio_outputs.dim() == 4)
|
| 151 |
+
n = image_outputs.size(0)
|
| 152 |
+
imagePoolfunc = nn.AdaptiveAvgPool2d((1, 1))
|
| 153 |
+
pooled_image_outputs = imagePoolfunc(image_outputs).squeeze(3).squeeze(2)
|
| 154 |
+
audioPoolfunc = nn.AdaptiveAvgPool2d((1, 1))
|
| 155 |
+
pooled_audio_outputs_list = []
|
| 156 |
+
for idx in range(n):
|
| 157 |
+
nF = max(1, nframes[idx])
|
| 158 |
+
pooled_audio_outputs_list.append(audioPoolfunc(audio_outputs[idx][:, :, 0:nF]).unsqueeze(0))
|
| 159 |
+
pooled_audio_outputs = torch.cat(pooled_audio_outputs_list).squeeze(3).squeeze(2)
|
| 160 |
+
S = torch.mm(pooled_image_outputs, pooled_audio_outputs.t())
|
| 161 |
+
return S
|
| 162 |
+
|
| 163 |
+
def one_imposter_index(i, N):
|
| 164 |
+
imp_ind = random.randint(0, N - 2)
|
| 165 |
+
if imp_ind == i:
|
| 166 |
+
imp_ind = N - 1
|
| 167 |
+
return imp_ind
|
| 168 |
+
|
| 169 |
+
def basic_get_imposter_indices(N):
|
| 170 |
+
imposter_idc = []
|
| 171 |
+
for i in range(N):
|
| 172 |
+
# Select an imposter index for example i:
|
| 173 |
+
imp_ind = one_imposter_index(i, N)
|
| 174 |
+
imposter_idc.append(imp_ind)
|
| 175 |
+
return imposter_idc
|
| 176 |
+
|
| 177 |
+
def semihardneg_triplet_loss_from_S(S, margin):
|
| 178 |
+
"""
|
| 179 |
+
Input: Similarity matrix S as an autograd.Variable
|
| 180 |
+
Output: The one-way triplet loss from rows of S to columns of S. Impostors are taken
|
| 181 |
+
to be the most similar point to the anchor that is still less similar to the anchor
|
| 182 |
+
than the positive example.
|
| 183 |
+
You would need to run this function twice, once with S and once with S.t(),
|
| 184 |
+
in order to compute the triplet loss in both directions.
|
| 185 |
+
"""
|
| 186 |
+
assert(S.dim() == 2)
|
| 187 |
+
assert(S.size(0) == S.size(1))
|
| 188 |
+
N = S.size(0)
|
| 189 |
+
loss = torch.autograd.Variable(torch.zeros(1).type(S.data.type()), requires_grad=True)
|
| 190 |
+
# Imposter - ground truth
|
| 191 |
+
Sdiff = S - torch.diag(S).view(-1, 1)
|
| 192 |
+
eps = 1e-12
|
| 193 |
+
# All examples less similar than ground truth
|
| 194 |
+
mask = (Sdiff < -eps).type(torch.LongTensor)
|
| 195 |
+
maskf = mask.type_as(S)
|
| 196 |
+
# Mask out all examples >= gt with minimum similarity
|
| 197 |
+
Sp = maskf * Sdiff + (1 - maskf) * torch.min(Sdiff).detach()
|
| 198 |
+
# Find the index maximum similar of the remaining
|
| 199 |
+
_, idc = Sp.max(dim=1)
|
| 200 |
+
idc = idc.data.cpu()
|
| 201 |
+
# Vector mask: 1 iff there exists an example < gt
|
| 202 |
+
has_neg = (mask.sum(dim=1) > 0).data.type(torch.LongTensor)
|
| 203 |
+
# Random imposter indices
|
| 204 |
+
random_imp_ind = torch.LongTensor(basic_get_imposter_indices(N))
|
| 205 |
+
# Use hardneg if there exists an example < gt, otherwise use random imposter
|
| 206 |
+
imp_idc = has_neg * idc + (1 - has_neg) * random_imp_ind
|
| 207 |
+
# This could probably be vectorized too, but I haven't.
|
| 208 |
+
for i, imp in enumerate(imp_idc):
|
| 209 |
+
local_loss = Sdiff[i, imp] + margin
|
| 210 |
+
if (local_loss.data > 0).all():
|
| 211 |
+
loss = loss + local_loss
|
| 212 |
+
loss = loss / N
|
| 213 |
+
return loss
|
| 214 |
+
|
| 215 |
+
def sampled_triplet_loss_from_S(S, margin):
|
| 216 |
+
"""
|
| 217 |
+
Input: Similarity matrix S as an autograd.Variable
|
| 218 |
+
Output: The one-way triplet loss from rows of S to columns of S. Imposters are
|
| 219 |
+
randomly sampled from the columns of S.
|
| 220 |
+
You would need to run this function twice, once with S and once with S.t(),
|
| 221 |
+
in order to compute the triplet loss in both directions.
|
| 222 |
+
"""
|
| 223 |
+
assert(S.dim() == 2)
|
| 224 |
+
assert(S.size(0) == S.size(1))
|
| 225 |
+
N = S.size(0)
|
| 226 |
+
loss = torch.autograd.Variable(torch.zeros(1).type(S.data.type()), requires_grad=True)
|
| 227 |
+
# Imposter - ground truth
|
| 228 |
+
Sdiff = S - torch.diag(S).view(-1, 1)
|
| 229 |
+
imp_ind = torch.LongTensor(basic_get_imposter_indices(N))
|
| 230 |
+
# This could probably be vectorized too, but I haven't.
|
| 231 |
+
for i, imp in enumerate(imp_ind):
|
| 232 |
+
local_loss = Sdiff[i, imp] + margin
|
| 233 |
+
if (local_loss.data > 0).all():
|
| 234 |
+
loss = loss + local_loss
|
| 235 |
+
loss = loss / N
|
| 236 |
+
return loss
|
| 237 |
+
|
| 238 |
+
class AverageMeter(object):
|
| 239 |
+
"""Computes and stores the average and current value"""
|
| 240 |
+
def __init__(self):
|
| 241 |
+
self.reset()
|
| 242 |
+
|
| 243 |
+
def reset(self):
|
| 244 |
+
self.val = 0
|
| 245 |
+
self.avg = 0
|
| 246 |
+
self.sum = 0
|
| 247 |
+
self.count = 0
|
| 248 |
+
|
| 249 |
+
def update(self, val, n=1):
|
| 250 |
+
self.val = val
|
| 251 |
+
self.sum += val * n
|
| 252 |
+
self.count += n
|
| 253 |
+
self.avg = self.sum / self.count
|
| 254 |
+
|
| 255 |
+
def adjust_learning_rate(base_lr, lr_decay, optimizer, epoch):
|
| 256 |
+
"""Sets the learning rate to the initial LR decayed by 10 every lr_decay epochs"""
|
| 257 |
+
lr = base_lr * (0.1 ** (epoch // lr_decay))
|
| 258 |
+
print('now learning rate changed to {:f}'.format(lr))
|
| 259 |
+
for param_group in optimizer.param_groups:
|
| 260 |
+
param_group['lr'] = lr
|
| 261 |
+
|
| 262 |
+
def adjust_learning_rate2(base_lr, lr_decay, optimizer, epoch):
|
| 263 |
+
"""Sets the learning rate to the initial LR decayed by 10 every lr_decay epochs"""
|
| 264 |
+
for param_group in optimizer.param_groups:
|
| 265 |
+
cur_lr = param_group['lr']
|
| 266 |
+
print('current learing rate is {:f}'.format(lr))
|
| 267 |
+
lr = cur_lr * 0.1
|
| 268 |
+
print('now learning rate changed to {:f}'.format(lr))
|
| 269 |
+
for param_group in optimizer.param_groups:
|
| 270 |
+
param_group['lr'] = lr
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def load_progress(prog_pkl, quiet=False):
|
| 274 |
+
"""
|
| 275 |
+
load progress pkl file
|
| 276 |
+
Args:
|
| 277 |
+
prog_pkl(str): path to progress pkl file
|
| 278 |
+
Return:
|
| 279 |
+
progress(list):
|
| 280 |
+
epoch(int):
|
| 281 |
+
global_step(int):
|
| 282 |
+
best_epoch(int):
|
| 283 |
+
best_avg_r10(float):
|
| 284 |
+
"""
|
| 285 |
+
def _print(msg):
|
| 286 |
+
if not quiet:
|
| 287 |
+
print(msg)
|
| 288 |
+
|
| 289 |
+
with open(prog_pkl, "rb") as f:
|
| 290 |
+
prog = pickle.load(f)
|
| 291 |
+
epoch, global_step, best_epoch, best_avg_r10, _ = prog[-1]
|
| 292 |
+
|
| 293 |
+
_print("\nPrevious Progress:")
|
| 294 |
+
msg = "[%5s %7s %5s %7s %6s]" % ("epoch", "step", "best_epoch", "best_avg_r10", "time")
|
| 295 |
+
_print(msg)
|
| 296 |
+
return prog, epoch, global_step, best_epoch, best_avg_r10
|
| 297 |
+
|
| 298 |
+
def count_parameters(model):
|
| 299 |
+
return sum([p.numel() for p in model.parameters() if p.requires_grad])
|
| 300 |
+
|
| 301 |
+
PrenetConfig = namedtuple(
|
| 302 |
+
'PrenetConfig', ['input_size', 'hidden_size', 'num_layers', 'dropout'])
|
| 303 |
+
|
| 304 |
+
RNNConfig = namedtuple(
|
| 305 |
+
'RNNConfig',
|
| 306 |
+
['input_size', 'hidden_size', 'num_layers', 'dropout', 'residual'])
|
RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from torch import nn
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
|
| 10 |
+
from open_clip import get_input_dtype, get_tokenizer
|
| 11 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 12 |
+
from .precision import get_autocast
|
| 13 |
+
from .stats import calculate_stats, d_prime
|
| 14 |
+
from .zero_shot_classifier import build_zero_shot_classifier
|
| 15 |
+
from .zero_shot_metadata import CLASSNAMES, OPENAI_IMAGENET_TEMPLATES
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def accuracy(output, target, topk=(1,)):
|
| 19 |
+
pred = output.topk(max(topk), 1, True, True)[1].t()
|
| 20 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 21 |
+
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def run(model, classifier, dataloader, args):
|
| 25 |
+
autocast = get_autocast(args.precision)
|
| 26 |
+
input_dtype = get_input_dtype(args.precision)
|
| 27 |
+
|
| 28 |
+
with torch.no_grad():
|
| 29 |
+
top1, top5, n = 0., 0., 0.
|
| 30 |
+
for images, target in tqdm(dataloader, unit_scale=args.batch_size):
|
| 31 |
+
images = images.to(device=args.device, dtype=input_dtype)
|
| 32 |
+
images = images.unsqueeze(2)
|
| 33 |
+
target = target.to(args.device)
|
| 34 |
+
|
| 35 |
+
with autocast():
|
| 36 |
+
# predict
|
| 37 |
+
output = model(image=images)
|
| 38 |
+
image_features = output['image_features'] if isinstance(output, dict) else output[0]
|
| 39 |
+
logits = 100. * image_features @ classifier
|
| 40 |
+
|
| 41 |
+
# measure accuracy
|
| 42 |
+
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
|
| 43 |
+
top1 += acc1
|
| 44 |
+
top5 += acc5
|
| 45 |
+
n += images.size(0)
|
| 46 |
+
|
| 47 |
+
top1 = (top1 / n)
|
| 48 |
+
top5 = (top5 / n)
|
| 49 |
+
return top1, top5
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def validate(audio_model, classifier, val_loader, args, epoch):
|
| 53 |
+
epoch = epoch - 1 ########################
|
| 54 |
+
# switch to evaluate mode
|
| 55 |
+
audio_model.eval()
|
| 56 |
+
autocast = get_autocast(args.precision)
|
| 57 |
+
input_dtype = get_input_dtype(args.precision)
|
| 58 |
+
A_predictions = []
|
| 59 |
+
A_targets = []
|
| 60 |
+
A_loss = []
|
| 61 |
+
with torch.no_grad():
|
| 62 |
+
for i, (audio_input, labels) in enumerate(tqdm(val_loader)):
|
| 63 |
+
audio_input = audio_input.to(device=args.device, dtype=input_dtype)
|
| 64 |
+
|
| 65 |
+
# compute output
|
| 66 |
+
with autocast():
|
| 67 |
+
# predict
|
| 68 |
+
output = audio_model(image=audio_input)
|
| 69 |
+
image_features = output['image_features'] if isinstance(output, dict) else output[0]
|
| 70 |
+
logits = 100. * image_features @ classifier
|
| 71 |
+
audio_output = logits
|
| 72 |
+
|
| 73 |
+
# audio_output = torch.sigmoid(audio_output)
|
| 74 |
+
predictions = audio_output.to('cpu').detach()
|
| 75 |
+
|
| 76 |
+
A_predictions.append(predictions)
|
| 77 |
+
A_targets.append(labels)
|
| 78 |
+
|
| 79 |
+
# compute the loss
|
| 80 |
+
labels = labels.to(args.device)
|
| 81 |
+
loss = nn.CrossEntropyLoss()(audio_output, torch.argmax(labels.long(), dim=1))
|
| 82 |
+
A_loss.append(loss.to('cpu').detach())
|
| 83 |
+
|
| 84 |
+
audio_output = torch.cat(A_predictions)
|
| 85 |
+
target = torch.cat(A_targets)
|
| 86 |
+
loss = np.mean(A_loss)
|
| 87 |
+
stats = calculate_stats(audio_output, target)
|
| 88 |
+
|
| 89 |
+
# save the prediction here
|
| 90 |
+
args.a_cls_output_dir = os.path.join(args.log_base_path, f'a_cls/{args.val_a_cls_data.lower()}')
|
| 91 |
+
os.makedirs(args.a_cls_output_dir, exist_ok=True)
|
| 92 |
+
if os.path.exists(args.a_cls_output_dir + '/predictions') == False:
|
| 93 |
+
os.mkdir(args.a_cls_output_dir + '/predictions')
|
| 94 |
+
np.savetxt(args.a_cls_output_dir + '/predictions/target.csv', target, delimiter=',')
|
| 95 |
+
np.savetxt(args.a_cls_output_dir + '/predictions/predictions_' + str(epoch) + '.csv', audio_output,
|
| 96 |
+
delimiter=',')
|
| 97 |
+
|
| 98 |
+
valid_loss = loss
|
| 99 |
+
main_metrics = 'mAP'
|
| 100 |
+
metrics = {}
|
| 101 |
+
|
| 102 |
+
if args.do_train:
|
| 103 |
+
# ensemble results
|
| 104 |
+
cum_stats = validate_ensemble(args, epoch)
|
| 105 |
+
cum_mAP = np.mean([stat['AP'] for stat in cum_stats])
|
| 106 |
+
cum_mAUC = np.mean([stat['auc'] for stat in cum_stats])
|
| 107 |
+
cum_acc = cum_stats[0]['acc']
|
| 108 |
+
|
| 109 |
+
mAP = np.mean([stat['AP'] for stat in stats])
|
| 110 |
+
mAUC = np.mean([stat['auc'] for stat in stats])
|
| 111 |
+
acc = stats[0]['acc']
|
| 112 |
+
|
| 113 |
+
middle_ps = [stat['precisions'][int(len(stat['precisions']) / 2)] for stat in stats]
|
| 114 |
+
middle_rs = [stat['recalls'][int(len(stat['recalls']) / 2)] for stat in stats]
|
| 115 |
+
average_precision = np.mean(middle_ps)
|
| 116 |
+
average_recall = np.mean(middle_rs)
|
| 117 |
+
|
| 118 |
+
if main_metrics == 'mAP':
|
| 119 |
+
logging.info("mAP: {:.6f}".format(mAP))
|
| 120 |
+
else:
|
| 121 |
+
logging.info("acc: {:.6f}".format(acc))
|
| 122 |
+
logging.info("AUC: {:.6f}".format(mAUC))
|
| 123 |
+
logging.info("Avg Precision: {:.6f}".format(average_precision))
|
| 124 |
+
logging.info("Avg Recall: {:.6f}".format(average_recall))
|
| 125 |
+
logging.info("d_prime: {:.6f}".format(d_prime(mAUC)))
|
| 126 |
+
logging.info("valid_loss: {:.6f}".format(valid_loss))
|
| 127 |
+
|
| 128 |
+
if args.do_train:
|
| 129 |
+
logging.info("cum_mAP: {:.6f}".format(cum_mAP))
|
| 130 |
+
logging.info("cum_mAUC: {:.6f}".format(cum_mAUC))
|
| 131 |
+
|
| 132 |
+
if main_metrics == 'mAP':
|
| 133 |
+
metrics['mAP'] = float(mAP)
|
| 134 |
+
else:
|
| 135 |
+
metrics['acc'] = float(acc)
|
| 136 |
+
|
| 137 |
+
metrics['mAUC'] = float(mAUC)
|
| 138 |
+
metrics['average_precision'] = float(average_precision)
|
| 139 |
+
metrics['average_recall'] = float(average_recall)
|
| 140 |
+
metrics['d_prime_mAUC'] = float(d_prime(mAUC))
|
| 141 |
+
metrics['valid_loss'] = float(valid_loss)
|
| 142 |
+
|
| 143 |
+
if args.do_train:
|
| 144 |
+
metrics['cum_mAP'] = float(cum_mAP)
|
| 145 |
+
metrics['cum_mAUC'] = float(cum_mAUC)
|
| 146 |
+
|
| 147 |
+
return metrics
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def validate_ensemble(args, epoch):
|
| 151 |
+
exp_dir = args.a_cls_output_dir
|
| 152 |
+
target = np.loadtxt(exp_dir + '/predictions/target.csv', delimiter=',')
|
| 153 |
+
if epoch == 0:
|
| 154 |
+
cum_predictions = np.loadtxt(exp_dir + '/predictions/predictions_0.csv', delimiter=',')
|
| 155 |
+
else:
|
| 156 |
+
cum_predictions = np.loadtxt(exp_dir + '/predictions/cum_predictions.csv', delimiter=',') * (epoch - 1)
|
| 157 |
+
predictions = np.loadtxt(exp_dir + '/predictions/predictions_' + str(epoch) + '.csv', delimiter=',')
|
| 158 |
+
cum_predictions = cum_predictions + predictions
|
| 159 |
+
# remove the prediction file to save storage space
|
| 160 |
+
os.remove(exp_dir + '/predictions/predictions_' + str(epoch - 1) + '.csv')
|
| 161 |
+
|
| 162 |
+
cum_predictions = cum_predictions / (epoch + 1)
|
| 163 |
+
np.savetxt(exp_dir + '/predictions/cum_predictions.csv', cum_predictions, delimiter=',')
|
| 164 |
+
|
| 165 |
+
stats = calculate_stats(cum_predictions, target)
|
| 166 |
+
return stats
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def zero_shot_eval(model, data, epoch, args):
|
| 177 |
+
temp_val_a_cls_data = args.val_a_cls_data
|
| 178 |
+
args.val_a_cls_data = list(data.keys())
|
| 179 |
+
assert len(args.val_a_cls_data) == 1
|
| 180 |
+
args.val_a_cls_data = args.val_a_cls_data[0]
|
| 181 |
+
|
| 182 |
+
if args.val_a_cls_data not in data:
|
| 183 |
+
return {}
|
| 184 |
+
if args.zeroshot_frequency == 0:
|
| 185 |
+
return {}
|
| 186 |
+
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
|
| 187 |
+
return {}
|
| 188 |
+
if args.distributed and not args.horovod:
|
| 189 |
+
model = model.module
|
| 190 |
+
|
| 191 |
+
logging.info(f'Starting zero-shot {args.val_a_cls_data.upper()}.')
|
| 192 |
+
|
| 193 |
+
logging.info('Building zero-shot classifier')
|
| 194 |
+
autocast = get_autocast(args.precision)
|
| 195 |
+
with autocast():
|
| 196 |
+
tokenizer = get_tokenizer(HF_HUB_PREFIX+args.model, cache_dir=args.cache_dir)
|
| 197 |
+
# tokenizer = get_tokenizer("ViT-L-14")
|
| 198 |
+
classifier = build_zero_shot_classifier(
|
| 199 |
+
model,
|
| 200 |
+
tokenizer=tokenizer,
|
| 201 |
+
classnames=CLASSNAMES[args.val_a_cls_data],
|
| 202 |
+
templates=OPENAI_IMAGENET_TEMPLATES,
|
| 203 |
+
num_classes_per_batch=10,
|
| 204 |
+
device=args.device,
|
| 205 |
+
use_tqdm=True,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
logging.info('Using classifier')
|
| 209 |
+
results = {}
|
| 210 |
+
if args.val_a_cls_data.lower() == 'audioset':
|
| 211 |
+
if args.val_a_cls_data in data:
|
| 212 |
+
stats = validate(model, classifier, data[args.val_a_cls_data].dataloader, args, epoch)
|
| 213 |
+
results.update(stats)
|
| 214 |
+
else:
|
| 215 |
+
if args.val_a_cls_data in data:
|
| 216 |
+
top1, top5 = run(model, classifier, data[args.val_a_cls_data].dataloader, args)
|
| 217 |
+
results[f'{args.val_a_cls_data}-zeroshot-val-top1'] = top1
|
| 218 |
+
results[f'{args.val_a_cls_data}-zeroshot-val-top5'] = top5
|
| 219 |
+
|
| 220 |
+
logging.info(f'Finished zero-shot {args.val_a_cls_data.upper()}.')
|
| 221 |
+
|
| 222 |
+
args.val_a_cls_data = temp_val_a_cls_data
|
| 223 |
+
return results
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
|
RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot_classifier.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import partial
|
| 2 |
+
from itertools import islice
|
| 3 |
+
from typing import Callable, List, Optional, Sequence, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def batched(iterable, n):
|
| 10 |
+
"""Batch data into lists of length *n*. The last batch may be shorter.
|
| 11 |
+
NOTE based on more-itertools impl, to be replaced by python 3.12 itertools.batched impl
|
| 12 |
+
"""
|
| 13 |
+
it = iter(iterable)
|
| 14 |
+
while True:
|
| 15 |
+
batch = list(islice(it, n))
|
| 16 |
+
if not batch:
|
| 17 |
+
break
|
| 18 |
+
yield batch
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_zero_shot_classifier(
|
| 22 |
+
model,
|
| 23 |
+
tokenizer,
|
| 24 |
+
classnames: Sequence[str],
|
| 25 |
+
templates: Sequence[Union[Callable, str]],
|
| 26 |
+
num_classes_per_batch: Optional[int] = 10,
|
| 27 |
+
device: Union[str, torch.device] = 'cpu',
|
| 28 |
+
use_tqdm: bool = False,
|
| 29 |
+
):
|
| 30 |
+
""" Build zero-shot classifier weights by iterating over class names in batches
|
| 31 |
+
Args:
|
| 32 |
+
model: CLIP model instance
|
| 33 |
+
tokenizer: CLIP tokenizer instance
|
| 34 |
+
classnames: A sequence of class (label) names
|
| 35 |
+
templates: A sequence of callables or format() friendly strings to produce templates per class name
|
| 36 |
+
num_classes_per_batch: The number of classes to batch together in each forward, all if None
|
| 37 |
+
device: Device to use.
|
| 38 |
+
use_tqdm: Enable TQDM progress bar.
|
| 39 |
+
"""
|
| 40 |
+
assert isinstance(templates, Sequence) and len(templates) > 0
|
| 41 |
+
assert isinstance(classnames, Sequence) and len(classnames) > 0
|
| 42 |
+
use_format = isinstance(templates[0], str)
|
| 43 |
+
num_templates = len(templates)
|
| 44 |
+
num_classes = len(classnames)
|
| 45 |
+
if use_tqdm:
|
| 46 |
+
import tqdm
|
| 47 |
+
num_iter = 1 if num_classes_per_batch is None else ((num_classes - 1) // num_classes_per_batch + 1)
|
| 48 |
+
iter_wrap = partial(tqdm.tqdm, total=num_iter, unit_scale=num_classes_per_batch)
|
| 49 |
+
else:
|
| 50 |
+
iter_wrap = iter
|
| 51 |
+
|
| 52 |
+
def _process_batch(batch_classnames):
|
| 53 |
+
num_batch_classes = len(batch_classnames)
|
| 54 |
+
texts = [template.format(c) if use_format else template(c) for c in batch_classnames for template in templates]
|
| 55 |
+
input_ids, attention_mask = tokenizer(texts)
|
| 56 |
+
input_ids, attention_mask = input_ids.to(device), attention_mask.to(device)
|
| 57 |
+
class_embeddings = F.normalize(model.encode_text(input_ids, attention_mask), dim=-1)
|
| 58 |
+
class_embeddings = class_embeddings.reshape(num_batch_classes, num_templates, -1).mean(dim=1)
|
| 59 |
+
class_embeddings = class_embeddings / class_embeddings.norm(dim=1, keepdim=True)
|
| 60 |
+
class_embeddings = class_embeddings.T
|
| 61 |
+
return class_embeddings
|
| 62 |
+
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
if num_classes_per_batch:
|
| 65 |
+
batched_embeds = [_process_batch(batch) for batch in iter_wrap(batched(classnames, num_classes_per_batch))]
|
| 66 |
+
zeroshot_weights = torch.cat(batched_embeds, dim=1)
|
| 67 |
+
else:
|
| 68 |
+
zeroshot_weights = _process_batch(classnames)
|
| 69 |
+
return zeroshot_weights
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def build_zero_shot_classifier_legacy(
|
| 73 |
+
model,
|
| 74 |
+
tokenizer,
|
| 75 |
+
classnames: Sequence[str],
|
| 76 |
+
templates: Sequence[Union[Callable, str]],
|
| 77 |
+
device: Union[str, torch.device] = 'cpu',
|
| 78 |
+
use_tqdm: bool = False,
|
| 79 |
+
):
|
| 80 |
+
""" Build zero-shot classifier weights by iterating over class names 1 by 1
|
| 81 |
+
Args:
|
| 82 |
+
model: CLIP model instance
|
| 83 |
+
tokenizer: CLIP tokenizer instance
|
| 84 |
+
classnames: A sequence of class (label) names
|
| 85 |
+
templates: A sequence of callables or format() friendly strings to produce templates per class name
|
| 86 |
+
device: Device to use.
|
| 87 |
+
use_tqdm: Enable TQDM progress bar.
|
| 88 |
+
"""
|
| 89 |
+
assert isinstance(templates, Sequence) and len(templates) > 0
|
| 90 |
+
assert isinstance(classnames, Sequence) and len(classnames) > 0
|
| 91 |
+
if use_tqdm:
|
| 92 |
+
import tqdm
|
| 93 |
+
iter_wrap = tqdm.tqdm
|
| 94 |
+
else:
|
| 95 |
+
iter_wrap = iter
|
| 96 |
+
|
| 97 |
+
use_format = isinstance(templates[0], str)
|
| 98 |
+
|
| 99 |
+
with torch.no_grad():
|
| 100 |
+
zeroshot_weights = []
|
| 101 |
+
for classname in iter_wrap(classnames):
|
| 102 |
+
texts = [template.format(classname) if use_format else template(classname) for template in templates]
|
| 103 |
+
texts = tokenizer(texts).to(device) # tokenize
|
| 104 |
+
class_embeddings = model.encode_text(texts)
|
| 105 |
+
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
|
| 106 |
+
class_embedding /= class_embedding.norm()
|
| 107 |
+
zeroshot_weights.append(class_embedding)
|
| 108 |
+
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
|
| 109 |
+
|
| 110 |
+
return zeroshot_weights
|
| 111 |
+
|
RAG/Knowledge_Database/languagebind_main/a_cls/zero_shot_metadata.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
OPENAI_IMAGENET_TEMPLATES = (
|
| 6 |
+
# lambda c: f'This is a sound of {c}.',
|
| 7 |
+
lambda c: f'a sound of {c}.',
|
| 8 |
+
)
|
| 9 |
+
# OPENAI_IMAGENET_TEMPLATES = (
|
| 10 |
+
# lambda c: f'a bad sound of a {c}.',
|
| 11 |
+
# lambda c: f'a sound of many {c}.',
|
| 12 |
+
# lambda c: f'a sculpture of a {c}.',
|
| 13 |
+
# lambda c: f'a sound of the hard to see {c}.',
|
| 14 |
+
# lambda c: f'a low resolution sound of the {c}.',
|
| 15 |
+
# lambda c: f'a rendering of a {c}.',
|
| 16 |
+
# lambda c: f'graffiti of a {c}.',
|
| 17 |
+
# lambda c: f'a bad sound of the {c}.',
|
| 18 |
+
# lambda c: f'a cropped sound of the {c}.',
|
| 19 |
+
# lambda c: f'a tattoo of a {c}.',
|
| 20 |
+
# lambda c: f'the embroidered {c}.',
|
| 21 |
+
# lambda c: f'a sound of a hard to see {c}.',
|
| 22 |
+
# lambda c: f'a bright sound of a {c}.',
|
| 23 |
+
# lambda c: f'a sound of a clean {c}.',
|
| 24 |
+
# lambda c: f'a sound of a dirty {c}.',
|
| 25 |
+
# lambda c: f'a dark sound of the {c}.',
|
| 26 |
+
# lambda c: f'a drawing of a {c}.',
|
| 27 |
+
# lambda c: f'a sound of my {c}.',
|
| 28 |
+
# lambda c: f'the plastic {c}.',
|
| 29 |
+
# lambda c: f'a sound of the cool {c}.',
|
| 30 |
+
# lambda c: f'a close-up sound of a {c}.',
|
| 31 |
+
# lambda c: f'a black and white sound of the {c}.',
|
| 32 |
+
# lambda c: f'a painting of the {c}.',
|
| 33 |
+
# lambda c: f'a painting of a {c}.',
|
| 34 |
+
# lambda c: f'a pixelated sound of the {c}.',
|
| 35 |
+
# lambda c: f'a sculpture of the {c}.',
|
| 36 |
+
# lambda c: f'a bright sound of the {c}.',
|
| 37 |
+
# lambda c: f'a cropped sound of a {c}.',
|
| 38 |
+
# lambda c: f'a plastic {c}.',
|
| 39 |
+
# lambda c: f'a sound of the dirty {c}.',
|
| 40 |
+
# lambda c: f'a jpeg corrupted sound of a {c}.',
|
| 41 |
+
# lambda c: f'a blurry sound of the {c}.',
|
| 42 |
+
# lambda c: f'a sound of the {c}.',
|
| 43 |
+
# lambda c: f'a good sound of the {c}.',
|
| 44 |
+
# lambda c: f'a rendering of the {c}.',
|
| 45 |
+
# lambda c: f'a {c} in a video game.',
|
| 46 |
+
# lambda c: f'a sound of one {c}.',
|
| 47 |
+
# lambda c: f'a doodle of a {c}.',
|
| 48 |
+
# lambda c: f'a close-up sound of the {c}.',
|
| 49 |
+
# lambda c: f'a sound of a {c}.',
|
| 50 |
+
# lambda c: f'the origami {c}.',
|
| 51 |
+
# lambda c: f'the {c} in a video game.',
|
| 52 |
+
# lambda c: f'a sketch of a {c}.',
|
| 53 |
+
# lambda c: f'a doodle of the {c}.',
|
| 54 |
+
# lambda c: f'a origami {c}.',
|
| 55 |
+
# lambda c: f'a low resolution sound of a {c}.',
|
| 56 |
+
# lambda c: f'the toy {c}.',
|
| 57 |
+
# lambda c: f'a rendition of the {c}.',
|
| 58 |
+
# lambda c: f'a sound of the clean {c}.',
|
| 59 |
+
# lambda c: f'a sound of a large {c}.',
|
| 60 |
+
# lambda c: f'a rendition of a {c}.',
|
| 61 |
+
# lambda c: f'a sound of a nice {c}.',
|
| 62 |
+
# lambda c: f'a sound of a weird {c}.',
|
| 63 |
+
# lambda c: f'a blurry sound of a {c}.',
|
| 64 |
+
# lambda c: f'a cartoon {c}.',
|
| 65 |
+
# lambda c: f'art of a {c}.',
|
| 66 |
+
# lambda c: f'a sketch of the {c}.',
|
| 67 |
+
# lambda c: f'a embroidered {c}.',
|
| 68 |
+
# lambda c: f'a pixelated sound of a {c}.',
|
| 69 |
+
# lambda c: f'itap of the {c}.',
|
| 70 |
+
# lambda c: f'a jpeg corrupted sound of the {c}.',
|
| 71 |
+
# lambda c: f'a good sound of a {c}.',
|
| 72 |
+
# lambda c: f'a plushie {c}.',
|
| 73 |
+
# lambda c: f'a sound of the nice {c}.',
|
| 74 |
+
# lambda c: f'a sound of the small {c}.',
|
| 75 |
+
# lambda c: f'a sound of the weird {c}.',
|
| 76 |
+
# lambda c: f'the cartoon {c}.',
|
| 77 |
+
# lambda c: f'art of the {c}.',
|
| 78 |
+
# lambda c: f'a drawing of the {c}.',
|
| 79 |
+
# lambda c: f'a sound of the large {c}.',
|
| 80 |
+
# lambda c: f'a black and white sound of a {c}.',
|
| 81 |
+
# lambda c: f'the plushie {c}.',
|
| 82 |
+
# lambda c: f'a dark sound of a {c}.',
|
| 83 |
+
# lambda c: f'itap of a {c}.',
|
| 84 |
+
# lambda c: f'graffiti of the {c}.',
|
| 85 |
+
# lambda c: f'a toy {c}.',
|
| 86 |
+
# lambda c: f'itap of my {c}.',
|
| 87 |
+
# lambda c: f'a sound of a cool {c}.',
|
| 88 |
+
# lambda c: f'a sound of a small {c}.',
|
| 89 |
+
# lambda c: f'a tattoo of the {c}.',
|
| 90 |
+
# )
|
| 91 |
+
|
| 92 |
+
# a much smaller subset of above prompts
|
| 93 |
+
# from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
|
| 94 |
+
SIMPLE_IMAGENET_TEMPLATES = (
|
| 95 |
+
lambda c: f'itap of a {c}.',
|
| 96 |
+
lambda c: f'a bad sound of the {c}.',
|
| 97 |
+
lambda c: f'a origami {c}.',
|
| 98 |
+
lambda c: f'a sound of the large {c}.',
|
| 99 |
+
lambda c: f'a {c} in a video game.',
|
| 100 |
+
lambda c: f'art of the {c}.',
|
| 101 |
+
lambda c: f'a sound of the small {c}.',
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "class_labels_indices.csv")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
CLASSNAMES = {
|
| 109 |
+
'Audioset': tuple(pd.read_csv(PATH).values[:, 2]),
|
| 110 |
+
'ESC50': (
|
| 111 |
+
'airplane', 'breathing', 'brushing teeth', 'can opening', 'car horn', 'cat', 'chainsaw', 'chirping birds',
|
| 112 |
+
'church bells', 'clapping', 'clock alarm', 'clock tick', 'coughing', 'cow', 'crackling fire', 'crickets',
|
| 113 |
+
'crow', 'crying baby', 'dog', 'door wood creaks', 'door wood knock', 'drinking sipping', 'engine', 'fireworks',
|
| 114 |
+
'footsteps', 'frog', 'glass breaking', 'hand saw', 'helicopter', 'hen', 'insects', 'keyboard typing',
|
| 115 |
+
'laughing', 'mouse click', 'pig', 'pouring water', 'rain', 'rooster', 'sea waves', 'sheep', 'siren',
|
| 116 |
+
'sneezing', 'snoring', 'thunderstorm', 'toilet flush', 'train', 'vacuum cleaner', 'washing machine',
|
| 117 |
+
'water drops', 'wind'
|
| 118 |
+
),
|
| 119 |
+
'VGGSound': (
|
| 120 |
+
'air conditioning noise', 'air horn', 'airplane', 'airplane flyby', 'alarm clock ringing',
|
| 121 |
+
'alligators, crocodiles hissing', 'ambulance siren', 'arc welding', 'baby babbling', 'baby crying',
|
| 122 |
+
'baby laughter', 'baltimore oriole calling', 'barn swallow calling', 'basketball bounce',
|
| 123 |
+
'bathroom ventilation fan running', 'beat boxing', 'bee, wasp, etc. buzzing', 'bird chirping, tweeting',
|
| 124 |
+
'bird squawking', 'bird wings flapping', 'black capped chickadee calling', 'blowtorch igniting',
|
| 125 |
+
'bouncing on trampoline', 'bowling impact', 'bull bellowing', 'canary calling', 'cap gun shooting',
|
| 126 |
+
'car engine idling', 'car engine knocking', 'car engine starting', 'car passing by', 'cat caterwauling',
|
| 127 |
+
'cat growling', 'cat hissing', 'cat meowing', 'cat purring', 'cattle mooing', 'cattle, bovinae cowbell',
|
| 128 |
+
'cell phone buzzing', 'chainsawing trees', 'cheetah chirrup', 'chicken clucking', 'chicken crowing',
|
| 129 |
+
'child singing', 'child speech, kid speaking', 'children shouting', 'chimpanzee pant-hooting',
|
| 130 |
+
'chinchilla barking', 'chipmunk chirping', 'chopping food', 'chopping wood', 'church bell ringing',
|
| 131 |
+
'civil defense siren', 'cow lowing', 'coyote howling', 'cricket chirping', 'crow cawing', 'cuckoo bird calling',
|
| 132 |
+
'cupboard opening or closing', 'cutting hair with electric trimmers', 'dinosaurs bellowing', 'disc scratching',
|
| 133 |
+
'dog barking', 'dog baying', 'dog bow-wow', 'dog growling', 'dog howling', 'dog whimpering',
|
| 134 |
+
'donkey, ass braying', 'door slamming', 'driving buses', 'driving motorcycle', 'driving snowmobile',
|
| 135 |
+
'duck quacking', 'eagle screaming', 'eating with cutlery', 'electric grinder grinding',
|
| 136 |
+
'electric shaver, electric razor shaving', 'elephant trumpeting', 'eletric blender running', 'elk bugling',
|
| 137 |
+
'engine accelerating, revving, vroom', 'female singing', 'female speech, woman speaking', 'ferret dooking',
|
| 138 |
+
'fire crackling', 'fire truck siren', 'fireworks banging', 'firing cannon', 'firing muskets',
|
| 139 |
+
'fly, housefly buzzing', 'foghorn', 'footsteps on snow', 'forging swords', 'fox barking', 'francolin calling',
|
| 140 |
+
'frog croaking', 'gibbon howling', 'goat bleating', 'golf driving', 'goose honking', 'hail',
|
| 141 |
+
'hair dryer drying', 'hammering nails', 'heart sounds, heartbeat', 'hedge trimmer running', 'helicopter',
|
| 142 |
+
'horse clip-clop', 'horse neighing', 'ice cracking', 'ice cream truck, ice cream van', 'lathe spinning',
|
| 143 |
+
'lawn mowing', 'lighting firecrackers', 'lions growling', 'lions roaring', 'lip smacking',
|
| 144 |
+
'machine gun shooting', 'magpie calling', 'male singing', 'male speech, man speaking', 'metronome',
|
| 145 |
+
'missile launch', 'mosquito buzzing', 'motorboat, speedboat acceleration', 'mouse clicking', 'mouse pattering',
|
| 146 |
+
'mouse squeaking', 'mynah bird singing', 'ocean burbling', 'opening or closing car doors',
|
| 147 |
+
'opening or closing car electric windows', 'opening or closing drawers', 'orchestra', 'otter growling',
|
| 148 |
+
'owl hooting', 'parrot talking', 'penguins braying', 'people babbling', 'people battle cry',
|
| 149 |
+
'people belly laughing', 'people booing', 'people burping', 'people cheering', 'people clapping',
|
| 150 |
+
'people coughing', 'people crowd', 'people eating', 'people eating apple', 'people eating crisps',
|
| 151 |
+
'people eating noodle', 'people farting', 'people finger snapping', 'people gargling', 'people giggling',
|
| 152 |
+
'people hiccup', 'people humming', 'people marching', 'people nose blowing', 'people running',
|
| 153 |
+
'people screaming', 'people shuffling', 'people slapping', 'people slurping', 'people sneezing',
|
| 154 |
+
'people sniggering', 'people sobbing', 'people whispering', 'people whistling', 'pheasant crowing',
|
| 155 |
+
'pig oinking', 'pigeon, dove cooing', 'planing timber', 'plastic bottle crushing', 'playing accordion',
|
| 156 |
+
'playing acoustic guitar', 'playing badminton', 'playing bagpipes', 'playing banjo', 'playing bass drum',
|
| 157 |
+
'playing bass guitar', 'playing bassoon', 'playing bongo', 'playing bugle', 'playing castanets',
|
| 158 |
+
'playing cello', 'playing clarinet', 'playing congas', 'playing cornet', 'playing cymbal', 'playing darts',
|
| 159 |
+
'playing didgeridoo', 'playing djembe', 'playing double bass', 'playing drum kit', 'playing electric guitar',
|
| 160 |
+
'playing electronic organ', 'playing erhu', 'playing flute', 'playing french horn', 'playing glockenspiel',
|
| 161 |
+
'playing gong', 'playing guiro', 'playing hammond organ', 'playing harmonica', 'playing harp',
|
| 162 |
+
'playing harpsichord', 'playing hockey', 'playing lacrosse', 'playing mandolin', 'playing marimba, xylophone',
|
| 163 |
+
'playing oboe', 'playing piano', 'playing saxophone', 'playing shofar', 'playing sitar', 'playing snare drum',
|
| 164 |
+
'playing squash', 'playing steel guitar, slide guitar', 'playing steelpan', 'playing synthesizer',
|
| 165 |
+
'playing tabla', 'playing table tennis', 'playing tambourine', 'playing tennis', 'playing theremin',
|
| 166 |
+
'playing timbales', 'playing timpani', 'playing trombone', 'playing trumpet', 'playing tuning fork',
|
| 167 |
+
'playing tympani', 'playing ukulele', 'playing vibraphone', 'playing violin, fiddle', 'playing volleyball',
|
| 168 |
+
'playing washboard', 'playing zither', 'police car (siren)', 'police radio chatter', 'popping popcorn',
|
| 169 |
+
'printer printing', 'pumping water', 'race car, auto racing', 'railroad car, train wagon', 'raining', 'rapping',
|
| 170 |
+
'reversing beeps', 'ripping paper', 'roller coaster running', 'rope skipping', 'rowboat, canoe, kayak rowing',
|
| 171 |
+
'running electric fan', 'sailing', 'scuba diving', 'sea lion barking', 'sea waves', 'sharpen knife',
|
| 172 |
+
'sheep bleating', 'shot football', 'singing bowl', 'singing choir', 'skateboarding', 'skidding', 'skiing',
|
| 173 |
+
'sliding door', 'sloshing water', 'slot machine', 'smoke detector beeping', 'snake hissing', 'snake rattling',
|
| 174 |
+
'splashing water', 'spraying water', 'squishing water', 'stream burbling', 'strike lighter', 'striking bowling',
|
| 175 |
+
'striking pool', 'subway, metro, underground', 'swimming', 'tap dancing', 'tapping guitar',
|
| 176 |
+
'telephone bell ringing', 'thunder', 'toilet flushing', 'tornado roaring', 'tractor digging', 'train horning',
|
| 177 |
+
'train wheels squealing', 'train whistling', 'turkey gobbling', 'typing on computer keyboard',
|
| 178 |
+
'typing on typewriter', 'underwater bubbling', 'using sewing machines', 'vacuum cleaner cleaning floors',
|
| 179 |
+
'vehicle horn, car horn, honking', 'volcano explosion', 'warbler chirping', 'waterfall burbling',
|
| 180 |
+
'whale calling', 'wind chime', 'wind noise', 'wind rustling leaves', 'wood thrush calling',
|
| 181 |
+
'woodpecker pecking tree', 'writing on blackboard with chalk', 'yodelling', 'zebra braying'
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
}
|
RAG/Knowledge_Database/languagebind_main/a_cls/zeroshot_cls.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from training.distributed import is_master
|
| 6 |
+
from .zero_shot import zero_shot_eval
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import wandb
|
| 10 |
+
except ImportError:
|
| 11 |
+
wandb = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def evaluate_a_cls(model, data, epoch, args, tb_writer=None):
|
| 16 |
+
metrics = {}
|
| 17 |
+
if not is_master(args):
|
| 18 |
+
return metrics
|
| 19 |
+
model.eval()
|
| 20 |
+
|
| 21 |
+
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
|
| 22 |
+
metrics.update(zero_shot_metrics)
|
| 23 |
+
|
| 24 |
+
if not metrics:
|
| 25 |
+
return metrics
|
| 26 |
+
|
| 27 |
+
logging.info(
|
| 28 |
+
f"Eval Epoch: {epoch} "
|
| 29 |
+
+ "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
|
| 30 |
+
)
|
| 31 |
+
if args.save_logs:
|
| 32 |
+
for name, val in metrics.items():
|
| 33 |
+
if tb_writer is not None:
|
| 34 |
+
tb_writer.add_scalar(f"val/a_cls/{args.val_a_cls_data[0].lower()}/{name}", val, epoch)
|
| 35 |
+
args.a_cls_output_dir = os.path.join(args.log_base_path, f'a_cls/{args.val_a_cls_data[0].lower()}')
|
| 36 |
+
os.makedirs(args.a_cls_output_dir, exist_ok=True)
|
| 37 |
+
with open(os.path.join(args.a_cls_output_dir, "results.jsonl"), "a+") as f:
|
| 38 |
+
f.write(json.dumps(metrics))
|
| 39 |
+
f.write("\n")
|
| 40 |
+
|
| 41 |
+
if args.wandb:
|
| 42 |
+
assert wandb is not None, 'Please install wandb.'
|
| 43 |
+
for name, val in metrics.items():
|
| 44 |
+
wandb.log({f"val/{name}": val, 'epoch': epoch})
|
| 45 |
+
|
| 46 |
+
return metrics
|
RAG/Knowledge_Database/languagebind_main/al_ret/data_dataloaders.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
from torch.utils.data import DataLoader
|
| 4 |
+
|
| 5 |
+
from data.build_datasets import get_data
|
| 6 |
+
from data.process_audio import get_audio_transform
|
| 7 |
+
from .dataloader_msrvtt_retrieval import MSRVTT_DataLoader
|
| 8 |
+
|
| 9 |
+
def dataloader_msrvtt_test(args, tokenizer, subset="test"):
|
| 10 |
+
msrvtt_testset = MSRVTT_DataLoader(
|
| 11 |
+
csv_path=args.val_csv,
|
| 12 |
+
features_path=args.features_path,
|
| 13 |
+
max_words=args.max_words,
|
| 14 |
+
tokenizer=tokenizer,
|
| 15 |
+
transform=get_audio_transform(args)
|
| 16 |
+
)
|
| 17 |
+
dataloader_msrvtt = DataLoader(
|
| 18 |
+
msrvtt_testset,
|
| 19 |
+
batch_size=args.batch_size_val,
|
| 20 |
+
num_workers=args.num_thread_reader,
|
| 21 |
+
shuffle=False,
|
| 22 |
+
drop_last=False,
|
| 23 |
+
)
|
| 24 |
+
return dataloader_msrvtt, len(msrvtt_testset)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
DATALOADER_DICT = {}
|
| 28 |
+
DATALOADER_DICT["msrvtt"] = {"val":dataloader_msrvtt_test, "test":None}
|
RAG/Knowledge_Database/languagebind_main/al_ret/dataloader_msrvtt_retrieval.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import unicode_literals
|
| 4 |
+
from __future__ import print_function
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
import torchaudio
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
import numpy as np
|
| 11 |
+
import pandas as pd
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
import json
|
| 14 |
+
import random
|
| 15 |
+
|
| 16 |
+
from torchvision.io import read_video
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MSRVTT_DataLoader(Dataset):
|
| 20 |
+
"""MSRVTT dataset loader."""
|
| 21 |
+
def __init__(
|
| 22 |
+
self,
|
| 23 |
+
csv_path,
|
| 24 |
+
features_path,
|
| 25 |
+
tokenizer,
|
| 26 |
+
transform=77,
|
| 27 |
+
max_words=30,
|
| 28 |
+
):
|
| 29 |
+
self.data = pd.read_csv(csv_path)
|
| 30 |
+
self.features_path = features_path
|
| 31 |
+
self.max_words = max_words
|
| 32 |
+
self.tokenizer = tokenizer
|
| 33 |
+
|
| 34 |
+
# self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution)
|
| 35 |
+
self.transform = transform
|
| 36 |
+
self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>",
|
| 37 |
+
"MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def __len__(self):
|
| 42 |
+
return len(self.data)
|
| 43 |
+
|
| 44 |
+
def _get_text(self, video_id, sentence):
|
| 45 |
+
choice_video_ids = [video_id]
|
| 46 |
+
n_caption = len(choice_video_ids)
|
| 47 |
+
|
| 48 |
+
k = n_caption
|
| 49 |
+
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
|
| 50 |
+
pairs_mask = np.zeros((k, self.max_words), dtype=np.long)
|
| 51 |
+
pairs_segment = np.zeros((k, self.max_words), dtype=np.long)
|
| 52 |
+
|
| 53 |
+
for i, video_id in enumerate(choice_video_ids):
|
| 54 |
+
# words = self.tokenizer.tokenize(sentence)
|
| 55 |
+
#
|
| 56 |
+
# words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words
|
| 57 |
+
# total_length_with_CLS = self.max_words - 1
|
| 58 |
+
# if len(words) > total_length_with_CLS:
|
| 59 |
+
# words = words[:total_length_with_CLS]
|
| 60 |
+
# words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]]
|
| 61 |
+
#
|
| 62 |
+
# input_ids = self.tokenizer.convert_tokens_to_ids(words)
|
| 63 |
+
# input_mask = [1] * len(input_ids)
|
| 64 |
+
# segment_ids = [0] * len(input_ids)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
output = self.tokenizer(sentence)
|
| 68 |
+
|
| 69 |
+
input_ids = output[0].squeeze()
|
| 70 |
+
input_mask = output[1].squeeze()
|
| 71 |
+
segment_ids = [0] * len(input_ids)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
while len(input_ids) < self.max_words:
|
| 75 |
+
input_ids.append(0)
|
| 76 |
+
input_mask.append(0)
|
| 77 |
+
segment_ids.append(0)
|
| 78 |
+
assert len(input_ids) == self.max_words
|
| 79 |
+
assert len(input_mask) == self.max_words
|
| 80 |
+
assert len(segment_ids) == self.max_words
|
| 81 |
+
|
| 82 |
+
pairs_text[i] = np.array(input_ids)
|
| 83 |
+
pairs_mask[i] = np.array(input_mask)
|
| 84 |
+
pairs_segment[i] = np.array(segment_ids)
|
| 85 |
+
|
| 86 |
+
return pairs_text, pairs_mask, pairs_segment, choice_video_ids
|
| 87 |
+
|
| 88 |
+
def _get_rawvideo(self, choice_video_ids):
|
| 89 |
+
# Pair x L x T x 3 x H x W
|
| 90 |
+
audio = np.zeros((len(choice_video_ids), 3,
|
| 91 |
+
self.transform.num_mel_bins, self.transform.target_length), dtype=np.float)
|
| 92 |
+
assert len(choice_video_ids) == 1
|
| 93 |
+
for i, video_id in enumerate(choice_video_ids):
|
| 94 |
+
# Individual for YoucokII dataset, due to it video format
|
| 95 |
+
video_path = os.path.join(self.features_path, "{}.mp4".format(video_id))
|
| 96 |
+
if os.path.exists(video_path) is False:
|
| 97 |
+
video_path = video_path.replace(".mp4", ".webm")
|
| 98 |
+
|
| 99 |
+
# raw_video_data = self.rawVideoExtractor.get_video_data(video_path)
|
| 100 |
+
# _, raw_audio_data, info = read_video(video_path, pts_unit='sec')
|
| 101 |
+
# audio_data = self.transform((raw_audio_data, info['audio_fps']))
|
| 102 |
+
|
| 103 |
+
audio_data = torchaudio.load(video_path.replace('mp4', 'wav'))
|
| 104 |
+
audio_data = self.transform(audio_data)
|
| 105 |
+
# audio[i] = audio_data
|
| 106 |
+
return audio_data
|
| 107 |
+
|
| 108 |
+
def __getitem__(self, idx):
|
| 109 |
+
video_id = self.data['video_id'].values[idx]
|
| 110 |
+
sentence = self.data['sentence'].values[idx]
|
| 111 |
+
|
| 112 |
+
pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, sentence)
|
| 113 |
+
audio_data = self._get_rawvideo(choice_video_ids)
|
| 114 |
+
return audio_data, pairs_text, pairs_mask
|
RAG/Knowledge_Database/languagebind_main/al_ret/datasets.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os.path
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import torch
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
from data.build_datasets import DataInfo
|
| 10 |
+
from open_clip import get_input_dtype, get_tokenizer
|
| 11 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 12 |
+
from data.process_audio import get_audio_transform, torchaudio_loader
|
| 13 |
+
|
| 14 |
+
class Audiocaps_dataset(Dataset):
|
| 15 |
+
def __init__(self, data_path, transform, loader, tokenizer):
|
| 16 |
+
super(Audiocaps_dataset, self).__init__()
|
| 17 |
+
self.audio_root = data_path
|
| 18 |
+
raw_meta = pd.read_csv(f'{self.audio_root}/audiocaps_test.tsv', delimiter='\t').values
|
| 19 |
+
audio_ids = list(set(raw_meta[:, 1].tolist()))
|
| 20 |
+
captions = {}
|
| 21 |
+
for i in raw_meta:
|
| 22 |
+
if captions.get(i[1], None) is None:
|
| 23 |
+
captions[i[1]] = [i[2]]
|
| 24 |
+
else:
|
| 25 |
+
captions[i[1]] = captions[i[1]] + [i[2]]
|
| 26 |
+
# captions = {i[:1][0]: i[1:].tolist() for i in raw_meta}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
self.sample_len = 0
|
| 30 |
+
self.sentences_dict = {}
|
| 31 |
+
self.cut_off_points = []
|
| 32 |
+
for audio_id in audio_ids:
|
| 33 |
+
assert audio_id in captions
|
| 34 |
+
for cap in captions[audio_id]:
|
| 35 |
+
cap_txt = cap
|
| 36 |
+
self.sentences_dict[len(self.sentences_dict)] = (audio_id[10:], cap_txt)
|
| 37 |
+
self.cut_off_points.append(len(self.sentences_dict))
|
| 38 |
+
|
| 39 |
+
self.multi_sentence_per_audio = True # !!! important tag for eval
|
| 40 |
+
if self.multi_sentence_per_audio:
|
| 41 |
+
# if self.subset == "val" or self.subset == "test":
|
| 42 |
+
self.sentence_num = len(self.sentences_dict)
|
| 43 |
+
self.audio_num = len(audio_ids)
|
| 44 |
+
assert len(self.cut_off_points) == self.audio_num
|
| 45 |
+
print("Sentence number: {}".format(self.sentence_num))
|
| 46 |
+
print("Video number: {}".format(self.audio_num))
|
| 47 |
+
|
| 48 |
+
self.sample_len = len(self.sentences_dict)
|
| 49 |
+
|
| 50 |
+
self.transform = transform
|
| 51 |
+
self.torchaudio_loader = loader
|
| 52 |
+
self.tokenizer = tokenizer
|
| 53 |
+
|
| 54 |
+
def __len__(self):
|
| 55 |
+
return self.sample_len
|
| 56 |
+
|
| 57 |
+
def __getitem__(self, idx):
|
| 58 |
+
audiocap_id, caption = self.sentences_dict[idx]
|
| 59 |
+
|
| 60 |
+
audio_path = os.path.join(self.audio_root, audiocap_id)
|
| 61 |
+
audio = self.torchaudio_loader(audio_path)
|
| 62 |
+
audio_data = self.transform(audio)
|
| 63 |
+
|
| 64 |
+
input_ids, attention_mask = self.tokenizer(caption)
|
| 65 |
+
return audio_data, input_ids.squeeze(), attention_mask.squeeze()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class Clotho_dataset(Dataset):
|
| 69 |
+
def __init__(self, data_path, transform, loader, tokenizer):
|
| 70 |
+
super(Clotho_dataset, self).__init__()
|
| 71 |
+
self.audio_root = data_path
|
| 72 |
+
raw_meta = pd.read_csv(f'{self.audio_root}/CLOTHO_retrieval_dataset/clotho_captions_evaluation.csv').values
|
| 73 |
+
audio_ids = raw_meta[:, 0].tolist()
|
| 74 |
+
captions = {i[:1][0]: i[1:].tolist() for i in raw_meta}
|
| 75 |
+
# self.meta = pd.DataFrame(np.vstack([np.vstack([raw_meta[:, 0], raw_meta[:, i]]).T for i in range(1, 6)]),
|
| 76 |
+
# columns=['uniq_id', 'text'])
|
| 77 |
+
|
| 78 |
+
self.sample_len = 0
|
| 79 |
+
self.sentences_dict = {}
|
| 80 |
+
self.cut_off_points = []
|
| 81 |
+
for audio_id in audio_ids:
|
| 82 |
+
assert audio_id in captions
|
| 83 |
+
for cap in captions[audio_id]:
|
| 84 |
+
cap_txt = cap
|
| 85 |
+
self.sentences_dict[len(self.sentences_dict)] = (audio_id, cap_txt)
|
| 86 |
+
self.cut_off_points.append(len(self.sentences_dict))
|
| 87 |
+
|
| 88 |
+
self.multi_sentence_per_audio = True # !!! important tag for eval
|
| 89 |
+
if self.multi_sentence_per_audio:
|
| 90 |
+
# if self.subset == "val" or self.subset == "test":
|
| 91 |
+
self.sentence_num = len(self.sentences_dict)
|
| 92 |
+
self.audio_num = len(audio_ids)
|
| 93 |
+
assert len(self.cut_off_points) == self.audio_num
|
| 94 |
+
print("Sentence number: {}".format(self.sentence_num))
|
| 95 |
+
print("Video number: {}".format(self.audio_num))
|
| 96 |
+
|
| 97 |
+
self.sample_len = len(self.sentences_dict)
|
| 98 |
+
|
| 99 |
+
self.transform = transform
|
| 100 |
+
self.torchaudio_loader = loader
|
| 101 |
+
self.tokenizer = tokenizer
|
| 102 |
+
|
| 103 |
+
def __len__(self):
|
| 104 |
+
return self.sample_len
|
| 105 |
+
|
| 106 |
+
def __getitem__(self, idx):
|
| 107 |
+
audiocap_id, caption = self.sentences_dict[idx]
|
| 108 |
+
# audiocap_id = self.meta['uniq_id'][idx]
|
| 109 |
+
audio_path = os.path.join(self.audio_root, f'evaluation/{audiocap_id}')
|
| 110 |
+
audio = self.torchaudio_loader(audio_path)
|
| 111 |
+
audio_data = self.transform(audio)
|
| 112 |
+
|
| 113 |
+
# caption = self.meta['text'][idx]
|
| 114 |
+
input_ids, attention_mask = self.tokenizer(caption)
|
| 115 |
+
return audio_data, input_ids.squeeze(), attention_mask.squeeze()
|
| 116 |
+
|
| 117 |
+
def get_audio_dataset(args):
|
| 118 |
+
data_path = args.audio_data_path
|
| 119 |
+
transform = get_audio_transform(args)
|
| 120 |
+
tokenizer = get_tokenizer(HF_HUB_PREFIX+args.model, cache_dir=args.cache_dir)
|
| 121 |
+
|
| 122 |
+
if args.val_al_ret_data.lower() == 'audiocaps':
|
| 123 |
+
dataset = Audiocaps_dataset(data_path, transform=transform, loader=torchaudio_loader, tokenizer=tokenizer)
|
| 124 |
+
elif args.val_al_ret_data.lower() == 'clotho':
|
| 125 |
+
dataset = Clotho_dataset(data_path, transform=transform, loader=torchaudio_loader, tokenizer=tokenizer)
|
| 126 |
+
else:
|
| 127 |
+
raise ValueError(f'unsupport dataset {args.val_al_ret_data}')
|
| 128 |
+
|
| 129 |
+
dataloader = torch.utils.data.DataLoader(
|
| 130 |
+
dataset,
|
| 131 |
+
batch_size=args.batch_size,
|
| 132 |
+
num_workers=args.workers,
|
| 133 |
+
shuffle=False,
|
| 134 |
+
drop_last=False,
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
return dataloader
|
RAG/Knowledge_Database/languagebind_main/al_ret/metrics.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
from __future__ import division
|
| 3 |
+
from __future__ import unicode_literals
|
| 4 |
+
from __future__ import print_function
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
def compute_metrics(x):
|
| 10 |
+
sx = np.sort(-x, axis=1)
|
| 11 |
+
d = np.diag(-x)
|
| 12 |
+
d = d[:, np.newaxis]
|
| 13 |
+
ind = sx - d
|
| 14 |
+
ind = np.where(ind == 0)
|
| 15 |
+
ind = ind[1]
|
| 16 |
+
metrics = {}
|
| 17 |
+
metrics['R1'] = float(np.sum(ind == 0)) * 100 / len(ind)
|
| 18 |
+
metrics['R5'] = float(np.sum(ind < 5)) * 100 / len(ind)
|
| 19 |
+
metrics['R10'] = float(np.sum(ind < 10)) * 100 / len(ind)
|
| 20 |
+
metrics['MR'] = np.median(ind) + 1
|
| 21 |
+
metrics["MedianR"] = metrics['MR']
|
| 22 |
+
metrics["MeanR"] = np.mean(ind) + 1
|
| 23 |
+
# metrics["cols"] = [int(i) for i in list(ind)]
|
| 24 |
+
return metrics
|
| 25 |
+
|
| 26 |
+
def print_computed_metrics(metrics):
|
| 27 |
+
r1 = metrics['R1']
|
| 28 |
+
r5 = metrics['R5']
|
| 29 |
+
r10 = metrics['R10']
|
| 30 |
+
mr = metrics['MR']
|
| 31 |
+
print('R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.format(r1, r5, r10, mr))
|
| 32 |
+
|
| 33 |
+
# below two functions directly come from: https://github.com/Deferf/Experiments
|
| 34 |
+
def tensor_text_to_video_metrics(sim_tensor, top_k = [1,5,10]):
|
| 35 |
+
if not torch.is_tensor(sim_tensor):
|
| 36 |
+
sim_tensor = torch.tensor(sim_tensor)
|
| 37 |
+
|
| 38 |
+
# Permute sim_tensor so it represents a sequence of text-video similarity matrices.
|
| 39 |
+
# Then obtain the double argsort to position the rank on the diagonal
|
| 40 |
+
stacked_sim_matrices = sim_tensor.permute(1, 0, 2)
|
| 41 |
+
first_argsort = torch.argsort(stacked_sim_matrices, dim = -1, descending= True)
|
| 42 |
+
second_argsort = torch.argsort(first_argsort, dim = -1, descending= False)
|
| 43 |
+
|
| 44 |
+
# Extracts ranks i.e diagonals
|
| 45 |
+
ranks = torch.flatten(torch.diagonal(second_argsort, dim1 = 1, dim2 = 2))
|
| 46 |
+
|
| 47 |
+
# Now we need to extract valid ranks, as some belong to inf padding values
|
| 48 |
+
permuted_original_data = torch.flatten(torch.diagonal(sim_tensor, dim1 = 0, dim2 = 2))
|
| 49 |
+
mask = ~ torch.logical_or(torch.isinf(permuted_original_data), torch.isnan(permuted_original_data))
|
| 50 |
+
valid_ranks = ranks[mask]
|
| 51 |
+
# A quick dimension check validates our results, there may be other correctness tests pending
|
| 52 |
+
# Such as dot product localization, but that is for other time.
|
| 53 |
+
#assert int(valid_ranks.shape[0]) == sum([len(text_dict[k]) for k in text_dict])
|
| 54 |
+
if not torch.is_tensor(valid_ranks):
|
| 55 |
+
valid_ranks = torch.tensor(valid_ranks)
|
| 56 |
+
results = {f"R{k}": float(torch.sum(valid_ranks < k) * 100 / len(valid_ranks)) for k in top_k}
|
| 57 |
+
results["MedianR"] = float(torch.median(valid_ranks + 1))
|
| 58 |
+
results["MeanR"] = float(np.mean(valid_ranks.numpy() + 1))
|
| 59 |
+
results["Std_Rank"] = float(np.std(valid_ranks.numpy() + 1))
|
| 60 |
+
results['MR'] = results["MedianR"]
|
| 61 |
+
return results
|
| 62 |
+
|
| 63 |
+
def tensor_video_to_text_sim(sim_tensor):
|
| 64 |
+
if not torch.is_tensor(sim_tensor):
|
| 65 |
+
sim_tensor = torch.tensor(sim_tensor)
|
| 66 |
+
# Code to avoid nans
|
| 67 |
+
sim_tensor[sim_tensor != sim_tensor] = float('-inf')
|
| 68 |
+
# Forms a similarity matrix for use with rank at k
|
| 69 |
+
values, _ = torch.max(sim_tensor, dim=1, keepdim=True)
|
| 70 |
+
return torch.squeeze(values).T
|
RAG/Knowledge_Database/languagebind_main/al_ret/precision.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from contextlib import suppress
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_autocast(precision):
|
| 6 |
+
if precision == 'amp':
|
| 7 |
+
return torch.cuda.amp.autocast
|
| 8 |
+
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
|
| 9 |
+
# amp_bfloat16 is more stable than amp float16 for clip training
|
| 10 |
+
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
|
| 11 |
+
else:
|
| 12 |
+
return suppress
|
RAG/Knowledge_Database/languagebind_main/al_ret/retrieval.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from training.distributed import is_master
|
| 9 |
+
from .zero_shot import zero_shot_eval
|
| 10 |
+
from .util import parallel_apply
|
| 11 |
+
from .metrics import compute_metrics, tensor_text_to_video_metrics, tensor_video_to_text_sim
|
| 12 |
+
from torch.nn import functional as F
|
| 13 |
+
try:
|
| 14 |
+
import wandb
|
| 15 |
+
except ImportError:
|
| 16 |
+
wandb = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
#
|
| 20 |
+
# def evaluate_al_ret(model, data, epoch, args, tb_writer=None):
|
| 21 |
+
# metrics = {}
|
| 22 |
+
# if not is_master(args):
|
| 23 |
+
# return metrics
|
| 24 |
+
# model.eval()
|
| 25 |
+
#
|
| 26 |
+
# zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
|
| 27 |
+
# metrics.update(zero_shot_metrics)
|
| 28 |
+
#
|
| 29 |
+
# if not metrics:
|
| 30 |
+
# return metrics
|
| 31 |
+
#
|
| 32 |
+
# logging.info(
|
| 33 |
+
# f"Eval Epoch: {epoch} "
|
| 34 |
+
# + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
|
| 35 |
+
# )
|
| 36 |
+
#
|
| 37 |
+
# if args.save_logs:
|
| 38 |
+
# for name, val in metrics.items():
|
| 39 |
+
# if tb_writer is not None:
|
| 40 |
+
# tb_writer.add_scalar(f"val/al_ret/{name}", val, epoch)
|
| 41 |
+
# args.al_ret_output_dir = os.path.join(args.log_base_path, 'al_ret')
|
| 42 |
+
# os.makedirs(args.al_ret_output_dir, exist_ok=True)
|
| 43 |
+
# with open(os.path.join(args.al_ret_output_dir, "results.jsonl"), "a+") as f:
|
| 44 |
+
# f.write(json.dumps(metrics))
|
| 45 |
+
# f.write("\n")
|
| 46 |
+
#
|
| 47 |
+
# if args.wandb:
|
| 48 |
+
# assert wandb is not None, 'Please install wandb.'
|
| 49 |
+
# for name, val in metrics.items():
|
| 50 |
+
# wandb.log({f"val/{name}": val, 'epoch': epoch})
|
| 51 |
+
#
|
| 52 |
+
# return metrics
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _run_on_single_gpu(model,
|
| 57 |
+
# batch_list_t, batch_list_v,
|
| 58 |
+
batch_sequence_output_list, batch_visual_output_list):
|
| 59 |
+
sim_matrix = []
|
| 60 |
+
for idx1 in range(len(batch_sequence_output_list)):
|
| 61 |
+
# input_mask, segment_ids, *_tmp = b1
|
| 62 |
+
sequence_output = batch_sequence_output_list[idx1]
|
| 63 |
+
each_row = []
|
| 64 |
+
for idx2 in range(len(batch_visual_output_list)):
|
| 65 |
+
# video_mask, *_tmp = b2
|
| 66 |
+
visual_output = batch_visual_output_list[idx2]
|
| 67 |
+
# b1b2_logits, *_tmp = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask,
|
| 68 |
+
# loose_type=model.loose_type)
|
| 69 |
+
# logging.info(f"{model.logit_scale.device}, {visual_output.device}, {sequence_output.device}")
|
| 70 |
+
b1b2_logits = model.logit_scale * sequence_output @ visual_output.T
|
| 71 |
+
# print(model.logit_scale.device, visual_output.device, sequence_output.device)
|
| 72 |
+
# logging.info(f"{b1b2_logits.shape}, {b1b2_logits.device}")
|
| 73 |
+
b1b2_logits = b1b2_logits.cpu().detach().numpy()
|
| 74 |
+
each_row.append(b1b2_logits)
|
| 75 |
+
each_row = np.concatenate(tuple(each_row), axis=-1)
|
| 76 |
+
sim_matrix.append(each_row)
|
| 77 |
+
return sim_matrix
|
| 78 |
+
|
| 79 |
+
def evaluate_al_ret(model, data, epoch, args, tb_writer=None):
|
| 80 |
+
if is_master(args) and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):
|
| 81 |
+
# print(data)
|
| 82 |
+
val_al_ret_data = list(data.keys())
|
| 83 |
+
# print(val_vl_ret_data)
|
| 84 |
+
assert len(val_al_ret_data) == 1
|
| 85 |
+
val_al_ret_data = val_al_ret_data[0]
|
| 86 |
+
test_dataloader = data[val_al_ret_data]
|
| 87 |
+
# print(len(test_dataloader))
|
| 88 |
+
# print(len(test_dataloader))
|
| 89 |
+
# print(len(test_dataloader))
|
| 90 |
+
# print(len(test_dataloader))
|
| 91 |
+
device = model.device
|
| 92 |
+
n_gpu = torch.cuda.device_count()
|
| 93 |
+
logging.info(f"\nEval Epoch: {epoch}, eval Audio-Text Retrieval under {val_al_ret_data.upper()} test data")
|
| 94 |
+
if hasattr(model, 'module'):
|
| 95 |
+
model = model.module.to(device)
|
| 96 |
+
else:
|
| 97 |
+
model = model.to(device)
|
| 98 |
+
# #################################################################
|
| 99 |
+
## below variables are used to multi-sentences retrieval
|
| 100 |
+
# multi_sentence_: important tag for eval
|
| 101 |
+
# cut_off_points: used to tag the label when calculate the metric
|
| 102 |
+
# sentence_num: used to cut the sentence representation
|
| 103 |
+
# video_num: used to cut the video representation
|
| 104 |
+
# #################################################################
|
| 105 |
+
multi_sentence_ = False
|
| 106 |
+
cut_off_points_, sentence_num_, video_num_ = [], -1, -1
|
| 107 |
+
if hasattr(test_dataloader.dataset, 'multi_sentence_per_audio') and test_dataloader.dataset.multi_sentence_per_audio:
|
| 108 |
+
# if False:
|
| 109 |
+
multi_sentence_ = True
|
| 110 |
+
cut_off_points_ = test_dataloader.dataset.cut_off_points
|
| 111 |
+
sentence_num_ = test_dataloader.dataset.sentence_num
|
| 112 |
+
video_num_ = test_dataloader.dataset.audio_num
|
| 113 |
+
cut_off_points_ = [itm - 1 for itm in cut_off_points_]
|
| 114 |
+
|
| 115 |
+
if multi_sentence_:
|
| 116 |
+
print("Eval under the multi-sentence per audio clip setting.")
|
| 117 |
+
print("sentence num: {}, video num: {}".format(sentence_num_, video_num_))
|
| 118 |
+
logging.info("Eval under the multi-sentence per audio clip setting.")
|
| 119 |
+
logging.info("sentence num: {}, video num: {}".format(sentence_num_, video_num_))
|
| 120 |
+
|
| 121 |
+
model.eval()
|
| 122 |
+
with torch.no_grad():
|
| 123 |
+
# batch_list_t = []
|
| 124 |
+
# batch_list_v = []
|
| 125 |
+
batch_sequence_output_list, batch_visual_output_list = [], []
|
| 126 |
+
total_video_num = 0
|
| 127 |
+
|
| 128 |
+
# ----------------------------
|
| 129 |
+
# 1. cache the features
|
| 130 |
+
# ----------------------------
|
| 131 |
+
for bid, batch in enumerate(test_dataloader):
|
| 132 |
+
# batch = tuple(t.to(device) for t in batch)
|
| 133 |
+
video, input_ids, attention_mask = batch
|
| 134 |
+
# print(input_ids.shape, video.shape, video.dtype)
|
| 135 |
+
input_ids = input_ids.squeeze().to(device)
|
| 136 |
+
attention_mask = attention_mask.squeeze().to(device)
|
| 137 |
+
# video = video.squeeze().permute(0, 2, 1, 3, 4).float().to(device)
|
| 138 |
+
video = video.float().to(device)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# print(input_ids.shape, video.shape, video.dtype)
|
| 143 |
+
# print(input_ids.shape, video.shape)
|
| 144 |
+
if multi_sentence_:
|
| 145 |
+
# multi-sentences retrieval means: one clip has two or more descriptions.
|
| 146 |
+
b, *_t = video.shape
|
| 147 |
+
sequence_output = model.encode_text(input_ids, attention_mask)
|
| 148 |
+
# logging.info(f'multi: {sequence_output.shape}')
|
| 149 |
+
# sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask)
|
| 150 |
+
batch_sequence_output_list.append(sequence_output)
|
| 151 |
+
# batch_list_t.append((input_mask, segment_ids,))
|
| 152 |
+
# 0 16
|
| 153 |
+
s_, e_ = total_video_num, total_video_num + b
|
| 154 |
+
filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_] # cut_off_points_ [0 4 9 14]
|
| 155 |
+
|
| 156 |
+
if len(filter_inds) > 0:
|
| 157 |
+
# video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...]
|
| 158 |
+
# print('before', video.shape)
|
| 159 |
+
video = video[filter_inds, ...]
|
| 160 |
+
# print('after', video.shape)
|
| 161 |
+
# visual_output = model.get_visual_output(video, video_mask)
|
| 162 |
+
visual_output = model.encode_image(video)
|
| 163 |
+
batch_visual_output_list.append(visual_output)
|
| 164 |
+
# batch_list_v.append((video_mask,))
|
| 165 |
+
total_video_num += b
|
| 166 |
+
else:
|
| 167 |
+
sequence_output = model.encode_text(input_ids, attention_mask)
|
| 168 |
+
visual_output = model.encode_image(video)
|
| 169 |
+
# sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask)
|
| 170 |
+
|
| 171 |
+
batch_sequence_output_list.append(sequence_output)
|
| 172 |
+
# batch_list_t.append((input_mask, segment_ids,))
|
| 173 |
+
|
| 174 |
+
batch_visual_output_list.append(visual_output)
|
| 175 |
+
# batch_list_v.append((video_mask,))
|
| 176 |
+
|
| 177 |
+
print(f"Process {val_al_ret_data.upper()}: {bid}/{len(test_dataloader)}\r", end='')
|
| 178 |
+
# ----------------------------------
|
| 179 |
+
# 2. calculate the similarity
|
| 180 |
+
# ----------------------------------
|
| 181 |
+
n_gpu = torch.cuda.device_count()
|
| 182 |
+
if n_gpu > 1:
|
| 183 |
+
# print('n_gpu > 1')
|
| 184 |
+
device_ids = list(range(n_gpu))
|
| 185 |
+
# print('device_ids', device_ids)
|
| 186 |
+
batch_t_output_splits = []
|
| 187 |
+
batch_v_output_splits = []
|
| 188 |
+
bacth_len = len(batch_sequence_output_list)
|
| 189 |
+
# print(bacth_len)
|
| 190 |
+
split_len = (bacth_len + n_gpu - 1) // n_gpu
|
| 191 |
+
for dev_id in device_ids:
|
| 192 |
+
s_, e_ = dev_id * split_len, (dev_id + 1) * split_len
|
| 193 |
+
if dev_id == 0:
|
| 194 |
+
|
| 195 |
+
batch_t_output_splits.append(batch_sequence_output_list[s_:e_])
|
| 196 |
+
batch_v_output_splits.append(batch_visual_output_list)
|
| 197 |
+
# print(len(batch_sequence_output_list[s_:e_]), len(batch_visual_output_list))
|
| 198 |
+
else:
|
| 199 |
+
devc = torch.device('cuda:{}'.format(str(dev_id)))
|
| 200 |
+
|
| 201 |
+
devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]]
|
| 202 |
+
batch_t_output_splits.append(devc_batch_list)
|
| 203 |
+
devc_batch_list = [b.to(devc) for b in batch_visual_output_list]
|
| 204 |
+
batch_v_output_splits.append(devc_batch_list)
|
| 205 |
+
# print(len(devc_batch_list), len(devc_batch_list))
|
| 206 |
+
parameters_tuple_list = [(
|
| 207 |
+
batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids]
|
| 208 |
+
parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids)
|
| 209 |
+
sim_matrix = []
|
| 210 |
+
for idx in range(len(parallel_outputs)):
|
| 211 |
+
sim_matrix += parallel_outputs[idx]
|
| 212 |
+
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
|
| 213 |
+
else:
|
| 214 |
+
sim_matrix = _run_on_single_gpu(model,
|
| 215 |
+
# batch_list_t, batch_list_v,
|
| 216 |
+
batch_sequence_output_list, batch_visual_output_list)
|
| 217 |
+
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
|
| 218 |
+
#####################################################################
|
| 219 |
+
if multi_sentence_:
|
| 220 |
+
|
| 221 |
+
logging.info(f"{val_al_ret_data.upper()} before reshape, sim matrix size: {sim_matrix.shape}")
|
| 222 |
+
cut_off_points2len_ = [itm + 1 for itm in cut_off_points_]
|
| 223 |
+
max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)])
|
| 224 |
+
sim_matrix_new = []
|
| 225 |
+
for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_):
|
| 226 |
+
sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_],
|
| 227 |
+
np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0))
|
| 228 |
+
sim_matrix = np.stack(tuple(sim_matrix_new), axis=0)
|
| 229 |
+
logging.info(f"{val_al_ret_data.upper()} after reshape, sim matrix size: {sim_matrix.shape}")
|
| 230 |
+
|
| 231 |
+
tv_metrics = tensor_text_to_video_metrics(sim_matrix)
|
| 232 |
+
# vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix))
|
| 233 |
+
else:
|
| 234 |
+
logging.info(f"{val_al_ret_data.upper()} sim matrix size: {sim_matrix.shape[0]}, {sim_matrix.shape[1]}")
|
| 235 |
+
t2v_sim_matrix = torch.from_numpy(sim_matrix).cuda()
|
| 236 |
+
# t2v_sim_matrix = t2v_sim_matrix * F.softmax(t2v_sim_matrix*10, dim=0) * len(t2v_sim_matrix)
|
| 237 |
+
tv_metrics = compute_metrics(t2v_sim_matrix.cpu().numpy())
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
# vt_metrics = compute_metrics(t2v_sim_matrix.T.cpu().numpy())
|
| 241 |
+
|
| 242 |
+
logging.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0])))
|
| 243 |
+
|
| 244 |
+
logging.info(f"{val_al_ret_data.upper()} Text-to-Audio:")
|
| 245 |
+
logging.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'.
|
| 246 |
+
format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR']))
|
| 247 |
+
# logging.info(f"{val_al_ret_data.upper()} Text-to-Audio:")
|
| 248 |
+
# logging.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'.
|
| 249 |
+
# format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR']))
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
if args.save_logs:
|
| 253 |
+
for name, val in tv_metrics.items():
|
| 254 |
+
if tb_writer is not None:
|
| 255 |
+
tb_writer.add_scalar(f"val/al_ret/{val_al_ret_data}/t2a/{name}", val, epoch)
|
| 256 |
+
# for name, val in vt_metrics.items():
|
| 257 |
+
# if tb_writer is not None:
|
| 258 |
+
# tb_writer.add_scalar(f"val/al_ret/{val_al_ret_data}/v2t/{name}", val, epoch)
|
| 259 |
+
|
| 260 |
+
args.al_ret_output_dir = os.path.join(args.log_base_path, f'al_ret/{val_al_ret_data}')
|
| 261 |
+
os.makedirs(args.al_ret_output_dir, exist_ok=True)
|
| 262 |
+
with open(os.path.join(args.al_ret_output_dir, "results.jsonl"), "a+") as f:
|
| 263 |
+
f.write(json.dumps({'t2a': tv_metrics}))
|
| 264 |
+
f.write("\n")
|
| 265 |
+
# f.write(json.dumps({'v2t': vt_metrics}))
|
| 266 |
+
# f.write("\n")
|
RAG/Knowledge_Database/languagebind_main/al_ret/util.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import threading
|
| 4 |
+
from torch._utils import ExceptionWrapper
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
def get_a_var(obj):
|
| 8 |
+
if isinstance(obj, torch.Tensor):
|
| 9 |
+
return obj
|
| 10 |
+
|
| 11 |
+
if isinstance(obj, list) or isinstance(obj, tuple):
|
| 12 |
+
for result in map(get_a_var, obj):
|
| 13 |
+
if isinstance(result, torch.Tensor):
|
| 14 |
+
return result
|
| 15 |
+
if isinstance(obj, dict):
|
| 16 |
+
for result in map(get_a_var, obj.items()):
|
| 17 |
+
if isinstance(result, torch.Tensor):
|
| 18 |
+
return result
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
def parallel_apply(fct, model, inputs, device_ids):
|
| 22 |
+
modules = nn.parallel.replicate(model, device_ids)
|
| 23 |
+
assert len(modules) == len(inputs)
|
| 24 |
+
lock = threading.Lock()
|
| 25 |
+
results = {}
|
| 26 |
+
grad_enabled = torch.is_grad_enabled()
|
| 27 |
+
|
| 28 |
+
def _worker(i, module, input):
|
| 29 |
+
torch.set_grad_enabled(grad_enabled)
|
| 30 |
+
device = get_a_var(input).get_device()
|
| 31 |
+
try:
|
| 32 |
+
with torch.cuda.device(device):
|
| 33 |
+
# this also avoids accidental slicing of `input` if it is a Tensor
|
| 34 |
+
if not isinstance(input, (list, tuple)):
|
| 35 |
+
input = (input,)
|
| 36 |
+
output = fct(module, *input)
|
| 37 |
+
with lock:
|
| 38 |
+
results[i] = output
|
| 39 |
+
except Exception:
|
| 40 |
+
with lock:
|
| 41 |
+
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
|
| 42 |
+
|
| 43 |
+
if len(modules) > 1:
|
| 44 |
+
threads = [threading.Thread(target=_worker, args=(i, module, input))
|
| 45 |
+
for i, (module, input) in enumerate(zip(modules, inputs))]
|
| 46 |
+
|
| 47 |
+
for thread in threads:
|
| 48 |
+
thread.start()
|
| 49 |
+
for thread in threads:
|
| 50 |
+
thread.join()
|
| 51 |
+
else:
|
| 52 |
+
_worker(0, modules[0], inputs[0])
|
| 53 |
+
|
| 54 |
+
outputs = []
|
| 55 |
+
for i in range(len(inputs)):
|
| 56 |
+
output = results[i]
|
| 57 |
+
if isinstance(output, ExceptionWrapper):
|
| 58 |
+
output.reraise()
|
| 59 |
+
outputs.append(output)
|
| 60 |
+
return outputs
|
| 61 |
+
|
| 62 |
+
def get_logger(filename=None):
|
| 63 |
+
logger = logging.getLogger('logger')
|
| 64 |
+
logger.setLevel(logging.DEBUG)
|
| 65 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
|
| 66 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 67 |
+
level=logging.INFO)
|
| 68 |
+
if filename is not None:
|
| 69 |
+
handler = logging.FileHandler(filename)
|
| 70 |
+
handler.setLevel(logging.DEBUG)
|
| 71 |
+
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
|
| 72 |
+
logging.getLogger().addHandler(handler)
|
| 73 |
+
return logger
|
RAG/Knowledge_Database/languagebind_main/al_ret/zero_shot.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
from open_clip import get_input_dtype, get_tokenizer
|
| 9 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 10 |
+
from .precision import get_autocast
|
| 11 |
+
|
| 12 |
+
def compute_metrics(x):
|
| 13 |
+
sx = np.sort(-x, axis=1)
|
| 14 |
+
d = np.diag(-x)
|
| 15 |
+
d = d[:, np.newaxis]
|
| 16 |
+
ind = sx - d
|
| 17 |
+
ind = np.where(ind == 0)
|
| 18 |
+
ind = ind[1]
|
| 19 |
+
metrics = {}
|
| 20 |
+
metrics['R1'] = float(np.sum(ind == 0)) * 100 / len(ind)
|
| 21 |
+
metrics['R5'] = float(np.sum(ind < 5)) * 100 / len(ind)
|
| 22 |
+
metrics['R10'] = float(np.sum(ind < 10)) * 100 / len(ind)
|
| 23 |
+
metrics['MR'] = np.median(ind) + 1
|
| 24 |
+
metrics["MedianR"] = metrics['MR']
|
| 25 |
+
metrics["MeanR"] = np.mean(ind) + 1
|
| 26 |
+
# metrics["cols"] = [int(i) for i in list(ind)]
|
| 27 |
+
return metrics
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _run_on_single_gpu(model, batch_sequence_output_list, batch_visual_output_list):
|
| 31 |
+
sim_matrix = []
|
| 32 |
+
logit_scale = model.logit_scale.exp()
|
| 33 |
+
for idx1, sequence_output in enumerate(batch_sequence_output_list):
|
| 34 |
+
each_row = []
|
| 35 |
+
for idx2, visual_output in enumerate(batch_visual_output_list):
|
| 36 |
+
b1b2_logits = logit_scale * torch.matmul(sequence_output, visual_output.t())
|
| 37 |
+
b1b2_logits = b1b2_logits.cpu().detach().numpy()
|
| 38 |
+
each_row.append(b1b2_logits)
|
| 39 |
+
each_row = np.concatenate(tuple(each_row), axis=-1)
|
| 40 |
+
sim_matrix.append(each_row)
|
| 41 |
+
return sim_matrix
|
| 42 |
+
|
| 43 |
+
def run(model, dataloader, args):
|
| 44 |
+
autocast = get_autocast(args.precision)
|
| 45 |
+
input_dtype = get_input_dtype(args.precision)
|
| 46 |
+
|
| 47 |
+
with torch.no_grad():
|
| 48 |
+
sequence_output_list, visual_output_list = [], []
|
| 49 |
+
for images, input_ids, attention_mask in tqdm(dataloader, unit_scale=args.batch_size):
|
| 50 |
+
images = images.to(device=args.device, dtype=input_dtype)
|
| 51 |
+
images = images.unsqueeze(2)
|
| 52 |
+
input_ids = input_ids.squeeze().to(args.device)
|
| 53 |
+
attention_mask = attention_mask.squeeze().to(args.device)
|
| 54 |
+
|
| 55 |
+
with autocast():
|
| 56 |
+
# predict
|
| 57 |
+
sequence_output = model.encode_text(input_ids, attention_mask)
|
| 58 |
+
visual_output = model.encode_image(images)
|
| 59 |
+
sequence_output_list.append(sequence_output)
|
| 60 |
+
visual_output_list.append(visual_output)
|
| 61 |
+
sim_matrix = _run_on_single_gpu(model, sequence_output_list, visual_output_list)
|
| 62 |
+
sim_matrix = np.concatenate(tuple(sim_matrix), axis=0)
|
| 63 |
+
return sim_matrix
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def zero_shot_eval(model, data, epoch, args):
|
| 67 |
+
temp_val_al_ret_data = args.val_al_ret_data
|
| 68 |
+
args.val_al_ret_data = list(data.keys())
|
| 69 |
+
assert len(args.val_al_ret_data) == 1
|
| 70 |
+
args.val_al_ret_data = args.val_al_ret_data[0]
|
| 71 |
+
|
| 72 |
+
if args.val_al_ret_data not in data:
|
| 73 |
+
return {}
|
| 74 |
+
if args.zeroshot_frequency == 0:
|
| 75 |
+
return {}
|
| 76 |
+
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
|
| 77 |
+
return {}
|
| 78 |
+
if args.distributed and not args.horovod:
|
| 79 |
+
model = model.module
|
| 80 |
+
|
| 81 |
+
logging.info(f'Starting zero-shot {args.val_al_ret_data.upper()}.')
|
| 82 |
+
|
| 83 |
+
results = {}
|
| 84 |
+
if args.val_al_ret_data in data:
|
| 85 |
+
logit_matrix = run(model, data[args.val_al_ret_data].dataloader, args)
|
| 86 |
+
results = compute_metrics(logit_matrix)
|
| 87 |
+
|
| 88 |
+
logging.info(f'Finished zero-shot {args.val_al_ret_data.upper()}.')
|
| 89 |
+
|
| 90 |
+
args.val_al_ret_data = temp_val_al_ret_data
|
| 91 |
+
return results
|
RAG/Knowledge_Database/languagebind_main/d_cls/cp_zero_shot_metadata.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
OPENAI_IMAGENET_TEMPLATES = (
|
| 6 |
+
lambda c: f'a bad photo of a {c}.',
|
| 7 |
+
lambda c: f'a photo of many {c}.',
|
| 8 |
+
lambda c: f'a sculpture of a {c}.',
|
| 9 |
+
lambda c: f'a photo of the hard to see {c}.',
|
| 10 |
+
lambda c: f'a low resolution photo of the {c}.',
|
| 11 |
+
lambda c: f'a rendering of a {c}.',
|
| 12 |
+
lambda c: f'graffiti of a {c}.',
|
| 13 |
+
lambda c: f'a bad photo of the {c}.',
|
| 14 |
+
lambda c: f'a cropped photo of the {c}.',
|
| 15 |
+
lambda c: f'a tattoo of a {c}.',
|
| 16 |
+
lambda c: f'the embroidered {c}.',
|
| 17 |
+
lambda c: f'a photo of a hard to see {c}.',
|
| 18 |
+
lambda c: f'a bright photo of a {c}.',
|
| 19 |
+
lambda c: f'a photo of a clean {c}.',
|
| 20 |
+
lambda c: f'a photo of a dirty {c}.',
|
| 21 |
+
lambda c: f'a dark photo of the {c}.',
|
| 22 |
+
lambda c: f'a drawing of a {c}.',
|
| 23 |
+
lambda c: f'a photo of my {c}.',
|
| 24 |
+
lambda c: f'the plastic {c}.',
|
| 25 |
+
lambda c: f'a photo of the cool {c}.',
|
| 26 |
+
lambda c: f'a close-up photo of a {c}.',
|
| 27 |
+
lambda c: f'a black and white photo of the {c}.',
|
| 28 |
+
lambda c: f'a painting of the {c}.',
|
| 29 |
+
lambda c: f'a painting of a {c}.',
|
| 30 |
+
lambda c: f'a pixelated photo of the {c}.',
|
| 31 |
+
lambda c: f'a sculpture of the {c}.',
|
| 32 |
+
lambda c: f'a bright photo of the {c}.',
|
| 33 |
+
lambda c: f'a cropped photo of a {c}.',
|
| 34 |
+
lambda c: f'a plastic {c}.',
|
| 35 |
+
lambda c: f'a photo of the dirty {c}.',
|
| 36 |
+
lambda c: f'a jpeg corrupted photo of a {c}.',
|
| 37 |
+
lambda c: f'a blurry photo of the {c}.',
|
| 38 |
+
lambda c: f'a photo of the {c}.',
|
| 39 |
+
lambda c: f'a good photo of the {c}.',
|
| 40 |
+
lambda c: f'a rendering of the {c}.',
|
| 41 |
+
lambda c: f'a {c} in a video game.',
|
| 42 |
+
lambda c: f'a photo of one {c}.',
|
| 43 |
+
lambda c: f'a doodle of a {c}.',
|
| 44 |
+
lambda c: f'a close-up photo of the {c}.',
|
| 45 |
+
lambda c: f'a photo of a {c}.',
|
| 46 |
+
lambda c: f'the origami {c}.',
|
| 47 |
+
lambda c: f'the {c} in a video game.',
|
| 48 |
+
lambda c: f'a sketch of a {c}.',
|
| 49 |
+
lambda c: f'a doodle of the {c}.',
|
| 50 |
+
lambda c: f'a origami {c}.',
|
| 51 |
+
lambda c: f'a low resolution photo of a {c}.',
|
| 52 |
+
lambda c: f'the toy {c}.',
|
| 53 |
+
lambda c: f'a rendition of the {c}.',
|
| 54 |
+
lambda c: f'a photo of the clean {c}.',
|
| 55 |
+
lambda c: f'a photo of a large {c}.',
|
| 56 |
+
lambda c: f'a rendition of a {c}.',
|
| 57 |
+
lambda c: f'a photo of a nice {c}.',
|
| 58 |
+
lambda c: f'a photo of a weird {c}.',
|
| 59 |
+
lambda c: f'a blurry photo of a {c}.',
|
| 60 |
+
lambda c: f'a cartoon {c}.',
|
| 61 |
+
lambda c: f'art of a {c}.',
|
| 62 |
+
lambda c: f'a sketch of the {c}.',
|
| 63 |
+
lambda c: f'a embroidered {c}.',
|
| 64 |
+
lambda c: f'a pixelated photo of a {c}.',
|
| 65 |
+
lambda c: f'itap of the {c}.',
|
| 66 |
+
lambda c: f'a jpeg corrupted photo of the {c}.',
|
| 67 |
+
lambda c: f'a good photo of a {c}.',
|
| 68 |
+
lambda c: f'a plushie {c}.',
|
| 69 |
+
lambda c: f'a photo of the nice {c}.',
|
| 70 |
+
lambda c: f'a photo of the small {c}.',
|
| 71 |
+
lambda c: f'a photo of the weird {c}.',
|
| 72 |
+
lambda c: f'the cartoon {c}.',
|
| 73 |
+
lambda c: f'art of the {c}.',
|
| 74 |
+
lambda c: f'a drawing of the {c}.',
|
| 75 |
+
lambda c: f'a photo of the large {c}.',
|
| 76 |
+
lambda c: f'a black and white photo of a {c}.',
|
| 77 |
+
lambda c: f'the plushie {c}.',
|
| 78 |
+
lambda c: f'a dark photo of a {c}.',
|
| 79 |
+
lambda c: f'itap of a {c}.',
|
| 80 |
+
lambda c: f'graffiti of the {c}.',
|
| 81 |
+
lambda c: f'a toy {c}.',
|
| 82 |
+
lambda c: f'itap of my {c}.',
|
| 83 |
+
lambda c: f'a photo of a cool {c}.',
|
| 84 |
+
lambda c: f'a photo of a small {c}.',
|
| 85 |
+
lambda c: f'a tattoo of the {c}.',
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# a much smaller subset of above prompts
|
| 90 |
+
# from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
|
| 91 |
+
SIMPLE_IMAGENET_TEMPLATES = (
|
| 92 |
+
lambda c: f'itap of a {c}.',
|
| 93 |
+
lambda c: f'a bad photo of the {c}.',
|
| 94 |
+
lambda c: f'a origami {c}.',
|
| 95 |
+
lambda c: f'a photo of the large {c}.',
|
| 96 |
+
lambda c: f'a {c} in a video game.',
|
| 97 |
+
lambda c: f'art of the {c}.',
|
| 98 |
+
lambda c: f'a photo of the small {c}.',
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
IMAGENET_CLASSNAMES = (
|
| 103 |
+
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
CLASSNAMES = {
|
| 108 |
+
'NYUV2': (
|
| 109 |
+
"bathroom", "bedroom", "bookstore", "classroom", "dining room",
|
| 110 |
+
"home office", "kitchen", "living room", "office", "others"
|
| 111 |
+
),
|
| 112 |
+
'SUNRGBD': (
|
| 113 |
+
"bathroom", "bedroom", "classroom", "computer room", "conference room", "corridor", "dining area",
|
| 114 |
+
"dining room", "discussion area", "furniture store", "home office", "kitchen", "lab", "lecture theatre",
|
| 115 |
+
"library", "living room", "office", "rest space", "study space"
|
| 116 |
+
),
|
| 117 |
+
}
|
RAG/Knowledge_Database/languagebind_main/d_cls/datasets.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from data.build_datasets import DataInfo
|
| 5 |
+
from data.process_depth import get_depth_transform, opencv_loader
|
| 6 |
+
from torchvision import datasets
|
| 7 |
+
|
| 8 |
+
def get_depth_dataset(args):
|
| 9 |
+
data_path = args.depth_data_path
|
| 10 |
+
transform = get_depth_transform(args)
|
| 11 |
+
dataset = datasets.ImageFolder(data_path, transform=transform, loader=opencv_loader)
|
| 12 |
+
|
| 13 |
+
dataloader = torch.utils.data.DataLoader(
|
| 14 |
+
dataset,
|
| 15 |
+
batch_size=args.batch_size,
|
| 16 |
+
num_workers=args.workers,
|
| 17 |
+
sampler=None,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
return DataInfo(dataloader=dataloader, sampler=None)
|
RAG/Knowledge_Database/languagebind_main/d_cls/precision.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from contextlib import suppress
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def get_autocast(precision):
|
| 6 |
+
if precision == 'amp':
|
| 7 |
+
return torch.cuda.amp.autocast
|
| 8 |
+
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
|
| 9 |
+
# amp_bfloat16 is more stable than amp float16 for clip training
|
| 10 |
+
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
|
| 11 |
+
else:
|
| 12 |
+
return suppress
|
RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
from open_clip import get_input_dtype, get_tokenizer
|
| 8 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 9 |
+
from .precision import get_autocast
|
| 10 |
+
from .zero_shot_classifier import build_zero_shot_classifier
|
| 11 |
+
from .zero_shot_metadata import CLASSNAMES, OPENAI_IMAGENET_TEMPLATES
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def accuracy(output, target, topk=(1,)):
|
| 15 |
+
pred = output.topk(max(topk), 1, True, True)[1].t()
|
| 16 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 17 |
+
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def run(model, classifier, dataloader, args):
|
| 21 |
+
autocast = get_autocast(args.precision)
|
| 22 |
+
input_dtype = get_input_dtype(args.precision)
|
| 23 |
+
|
| 24 |
+
with torch.no_grad():
|
| 25 |
+
top1, top5, n = 0., 0., 0.
|
| 26 |
+
for images, target in tqdm(dataloader, unit_scale=args.batch_size):
|
| 27 |
+
images = images.to(device=args.device, dtype=input_dtype)
|
| 28 |
+
images = images.unsqueeze(2)
|
| 29 |
+
target = target.to(args.device)
|
| 30 |
+
|
| 31 |
+
with autocast():
|
| 32 |
+
# predict
|
| 33 |
+
output = model(image=images)
|
| 34 |
+
image_features = output['image_features'] if isinstance(output, dict) else output[0]
|
| 35 |
+
logits = 100. * image_features @ classifier
|
| 36 |
+
|
| 37 |
+
# measure accuracy
|
| 38 |
+
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
|
| 39 |
+
top1 += acc1
|
| 40 |
+
top5 += acc5
|
| 41 |
+
n += images.size(0)
|
| 42 |
+
|
| 43 |
+
top1 = (top1 / n)
|
| 44 |
+
top5 = (top5 / n)
|
| 45 |
+
return top1, top5
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def zero_shot_eval(model, data, epoch, args):
|
| 49 |
+
temp_val_d_cls_data = args.val_d_cls_data
|
| 50 |
+
args.val_d_cls_data = list(data.keys())
|
| 51 |
+
assert len(args.val_d_cls_data) == 1
|
| 52 |
+
args.val_d_cls_data = args.val_d_cls_data[0]
|
| 53 |
+
|
| 54 |
+
if args.val_d_cls_data not in data:
|
| 55 |
+
return {}
|
| 56 |
+
if args.zeroshot_frequency == 0:
|
| 57 |
+
return {}
|
| 58 |
+
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
|
| 59 |
+
return {}
|
| 60 |
+
if args.distributed and not args.horovod:
|
| 61 |
+
model = model.module
|
| 62 |
+
|
| 63 |
+
logging.info(f'Starting zero-shot {args.val_d_cls_data.upper()}.')
|
| 64 |
+
|
| 65 |
+
logging.info('Building zero-shot classifier')
|
| 66 |
+
autocast = get_autocast(args.precision)
|
| 67 |
+
with autocast():
|
| 68 |
+
tokenizer = get_tokenizer(HF_HUB_PREFIX+args.model, cache_dir=args.cache_dir)
|
| 69 |
+
# tokenizer = get_tokenizer("ViT-L-14")
|
| 70 |
+
classifier = build_zero_shot_classifier(
|
| 71 |
+
model,
|
| 72 |
+
tokenizer=tokenizer,
|
| 73 |
+
classnames=CLASSNAMES[args.val_d_cls_data],
|
| 74 |
+
templates=OPENAI_IMAGENET_TEMPLATES,
|
| 75 |
+
num_classes_per_batch=10,
|
| 76 |
+
device=args.device,
|
| 77 |
+
use_tqdm=True,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
logging.info('Using classifier')
|
| 81 |
+
results = {}
|
| 82 |
+
if args.val_d_cls_data in data:
|
| 83 |
+
top1, top5 = run(model, classifier, data[args.val_d_cls_data].dataloader, args)
|
| 84 |
+
results[f'{args.val_d_cls_data}-zeroshot-val-top1'] = top1
|
| 85 |
+
results[f'{args.val_d_cls_data}-zeroshot-val-top5'] = top5
|
| 86 |
+
|
| 87 |
+
logging.info(f'Finished zero-shot {args.val_d_cls_data.upper()}.')
|
| 88 |
+
|
| 89 |
+
args.val_d_cls_data = temp_val_d_cls_data
|
| 90 |
+
return results
|
RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot_classifier.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import partial
|
| 2 |
+
from itertools import islice
|
| 3 |
+
from typing import Callable, List, Optional, Sequence, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def batched(iterable, n):
|
| 10 |
+
"""Batch data into lists of length *n*. The last batch may be shorter.
|
| 11 |
+
NOTE based on more-itertools impl, to be replaced by python 3.12 itertools.batched impl
|
| 12 |
+
"""
|
| 13 |
+
it = iter(iterable)
|
| 14 |
+
while True:
|
| 15 |
+
batch = list(islice(it, n))
|
| 16 |
+
if not batch:
|
| 17 |
+
break
|
| 18 |
+
yield batch
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_zero_shot_classifier(
|
| 22 |
+
model,
|
| 23 |
+
tokenizer,
|
| 24 |
+
classnames: Sequence[str],
|
| 25 |
+
templates: Sequence[Union[Callable, str]],
|
| 26 |
+
num_classes_per_batch: Optional[int] = 10,
|
| 27 |
+
device: Union[str, torch.device] = 'cpu',
|
| 28 |
+
use_tqdm: bool = False,
|
| 29 |
+
):
|
| 30 |
+
""" Build zero-shot classifier weights by iterating over class names in batches
|
| 31 |
+
Args:
|
| 32 |
+
model: CLIP model instance
|
| 33 |
+
tokenizer: CLIP tokenizer instance
|
| 34 |
+
classnames: A sequence of class (label) names
|
| 35 |
+
templates: A sequence of callables or format() friendly strings to produce templates per class name
|
| 36 |
+
num_classes_per_batch: The number of classes to batch together in each forward, all if None
|
| 37 |
+
device: Device to use.
|
| 38 |
+
use_tqdm: Enable TQDM progress bar.
|
| 39 |
+
"""
|
| 40 |
+
assert isinstance(templates, Sequence) and len(templates) > 0
|
| 41 |
+
assert isinstance(classnames, Sequence) and len(classnames) > 0
|
| 42 |
+
use_format = isinstance(templates[0], str)
|
| 43 |
+
num_templates = len(templates)
|
| 44 |
+
num_classes = len(classnames)
|
| 45 |
+
if use_tqdm:
|
| 46 |
+
import tqdm
|
| 47 |
+
num_iter = 1 if num_classes_per_batch is None else ((num_classes - 1) // num_classes_per_batch + 1)
|
| 48 |
+
iter_wrap = partial(tqdm.tqdm, total=num_iter, unit_scale=num_classes_per_batch)
|
| 49 |
+
else:
|
| 50 |
+
iter_wrap = iter
|
| 51 |
+
|
| 52 |
+
def _process_batch(batch_classnames):
|
| 53 |
+
num_batch_classes = len(batch_classnames)
|
| 54 |
+
texts = [template.format(c) if use_format else template(c) for c in batch_classnames for template in templates]
|
| 55 |
+
input_ids, attention_mask = tokenizer(texts)
|
| 56 |
+
input_ids, attention_mask = input_ids.to(device), attention_mask.to(device)
|
| 57 |
+
class_embeddings = F.normalize(model.encode_text(input_ids, attention_mask), dim=-1)
|
| 58 |
+
class_embeddings = class_embeddings.reshape(num_batch_classes, num_templates, -1).mean(dim=1)
|
| 59 |
+
class_embeddings = class_embeddings / class_embeddings.norm(dim=1, keepdim=True)
|
| 60 |
+
class_embeddings = class_embeddings.T
|
| 61 |
+
return class_embeddings
|
| 62 |
+
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
if num_classes_per_batch:
|
| 65 |
+
batched_embeds = [_process_batch(batch) for batch in iter_wrap(batched(classnames, num_classes_per_batch))]
|
| 66 |
+
zeroshot_weights = torch.cat(batched_embeds, dim=1)
|
| 67 |
+
else:
|
| 68 |
+
zeroshot_weights = _process_batch(classnames)
|
| 69 |
+
return zeroshot_weights
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def build_zero_shot_classifier_legacy(
|
| 73 |
+
model,
|
| 74 |
+
tokenizer,
|
| 75 |
+
classnames: Sequence[str],
|
| 76 |
+
templates: Sequence[Union[Callable, str]],
|
| 77 |
+
device: Union[str, torch.device] = 'cpu',
|
| 78 |
+
use_tqdm: bool = False,
|
| 79 |
+
):
|
| 80 |
+
""" Build zero-shot classifier weights by iterating over class names 1 by 1
|
| 81 |
+
Args:
|
| 82 |
+
model: CLIP model instance
|
| 83 |
+
tokenizer: CLIP tokenizer instance
|
| 84 |
+
classnames: A sequence of class (label) names
|
| 85 |
+
templates: A sequence of callables or format() friendly strings to produce templates per class name
|
| 86 |
+
device: Device to use.
|
| 87 |
+
use_tqdm: Enable TQDM progress bar.
|
| 88 |
+
"""
|
| 89 |
+
assert isinstance(templates, Sequence) and len(templates) > 0
|
| 90 |
+
assert isinstance(classnames, Sequence) and len(classnames) > 0
|
| 91 |
+
if use_tqdm:
|
| 92 |
+
import tqdm
|
| 93 |
+
iter_wrap = tqdm.tqdm
|
| 94 |
+
else:
|
| 95 |
+
iter_wrap = iter
|
| 96 |
+
|
| 97 |
+
use_format = isinstance(templates[0], str)
|
| 98 |
+
|
| 99 |
+
with torch.no_grad():
|
| 100 |
+
zeroshot_weights = []
|
| 101 |
+
for classname in iter_wrap(classnames):
|
| 102 |
+
texts = [template.format(classname) if use_format else template(classname) for template in templates]
|
| 103 |
+
texts = tokenizer(texts).to(device) # tokenize
|
| 104 |
+
class_embeddings = model.encode_text(texts)
|
| 105 |
+
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
|
| 106 |
+
class_embedding /= class_embedding.norm()
|
| 107 |
+
zeroshot_weights.append(class_embedding)
|
| 108 |
+
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
|
| 109 |
+
|
| 110 |
+
return zeroshot_weights
|
| 111 |
+
|
RAG/Knowledge_Database/languagebind_main/d_cls/zero_shot_metadata.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
OPENAI_IMAGENET_TEMPLATES = (
|
| 6 |
+
lambda c: f'a bad depth photo of a {c}.',
|
| 7 |
+
lambda c: f'a depth photo of many {c}.',
|
| 8 |
+
lambda c: f'a sculpture of a {c}.',
|
| 9 |
+
lambda c: f'a depth photo of the hard to see {c}.',
|
| 10 |
+
lambda c: f'a low resolution depth photo of the {c}.',
|
| 11 |
+
lambda c: f'a rendering of a {c}.',
|
| 12 |
+
lambda c: f'graffiti of a {c}.',
|
| 13 |
+
lambda c: f'a bad depth photo of the {c}.',
|
| 14 |
+
lambda c: f'a cropped depth photo of the {c}.',
|
| 15 |
+
lambda c: f'a tattoo of a {c}.',
|
| 16 |
+
lambda c: f'the embroidered {c}.',
|
| 17 |
+
lambda c: f'a depth photo of a hard to see {c}.',
|
| 18 |
+
lambda c: f'a bright depth photo of a {c}.',
|
| 19 |
+
lambda c: f'a depth photo of a clean {c}.',
|
| 20 |
+
lambda c: f'a depth photo of a dirty {c}.',
|
| 21 |
+
lambda c: f'a dark depth photo of the {c}.',
|
| 22 |
+
lambda c: f'a drawing of a {c}.',
|
| 23 |
+
lambda c: f'a depth photo of my {c}.',
|
| 24 |
+
lambda c: f'the plastic {c}.',
|
| 25 |
+
lambda c: f'a depth photo of the cool {c}.',
|
| 26 |
+
lambda c: f'a close-up depth photo of a {c}.',
|
| 27 |
+
lambda c: f'a black and white depth photo of the {c}.',
|
| 28 |
+
lambda c: f'a painting of the {c}.',
|
| 29 |
+
lambda c: f'a painting of a {c}.',
|
| 30 |
+
lambda c: f'a pixelated depth photo of the {c}.',
|
| 31 |
+
lambda c: f'a sculpture of the {c}.',
|
| 32 |
+
lambda c: f'a bright depth photo of the {c}.',
|
| 33 |
+
lambda c: f'a cropped depth photo of a {c}.',
|
| 34 |
+
lambda c: f'a plastic {c}.',
|
| 35 |
+
lambda c: f'a depth photo of the dirty {c}.',
|
| 36 |
+
lambda c: f'a jpeg corrupted depth photo of a {c}.',
|
| 37 |
+
lambda c: f'a blurry depth photo of the {c}.',
|
| 38 |
+
lambda c: f'a depth photo of the {c}.',
|
| 39 |
+
lambda c: f'a good depth photo of the {c}.',
|
| 40 |
+
lambda c: f'a rendering of the {c}.',
|
| 41 |
+
lambda c: f'a {c} in a video game.',
|
| 42 |
+
lambda c: f'a depth photo of one {c}.',
|
| 43 |
+
lambda c: f'a doodle of a {c}.',
|
| 44 |
+
lambda c: f'a close-up depth photo of the {c}.',
|
| 45 |
+
lambda c: f'a depth photo of a {c}.',
|
| 46 |
+
lambda c: f'the origami {c}.',
|
| 47 |
+
lambda c: f'the {c} in a video game.',
|
| 48 |
+
lambda c: f'a sketch of a {c}.',
|
| 49 |
+
lambda c: f'a doodle of the {c}.',
|
| 50 |
+
lambda c: f'a origami {c}.',
|
| 51 |
+
lambda c: f'a low resolution depth photo of a {c}.',
|
| 52 |
+
lambda c: f'the toy {c}.',
|
| 53 |
+
lambda c: f'a rendition of the {c}.',
|
| 54 |
+
lambda c: f'a depth photo of the clean {c}.',
|
| 55 |
+
lambda c: f'a depth photo of a large {c}.',
|
| 56 |
+
lambda c: f'a rendition of a {c}.',
|
| 57 |
+
lambda c: f'a depth photo of a nice {c}.',
|
| 58 |
+
lambda c: f'a depth photo of a weird {c}.',
|
| 59 |
+
lambda c: f'a blurry depth photo of a {c}.',
|
| 60 |
+
lambda c: f'a cartoon {c}.',
|
| 61 |
+
lambda c: f'art of a {c}.',
|
| 62 |
+
lambda c: f'a sketch of the {c}.',
|
| 63 |
+
lambda c: f'a embroidered {c}.',
|
| 64 |
+
lambda c: f'a pixelated depth photo of a {c}.',
|
| 65 |
+
lambda c: f'itap of the {c}.',
|
| 66 |
+
lambda c: f'a jpeg corrupted depth photo of the {c}.',
|
| 67 |
+
lambda c: f'a good depth photo of a {c}.',
|
| 68 |
+
lambda c: f'a plushie {c}.',
|
| 69 |
+
lambda c: f'a depth photo of the nice {c}.',
|
| 70 |
+
lambda c: f'a depth photo of the small {c}.',
|
| 71 |
+
lambda c: f'a depth photo of the weird {c}.',
|
| 72 |
+
lambda c: f'the cartoon {c}.',
|
| 73 |
+
lambda c: f'art of the {c}.',
|
| 74 |
+
lambda c: f'a drawing of the {c}.',
|
| 75 |
+
lambda c: f'a depth photo of the large {c}.',
|
| 76 |
+
lambda c: f'a black and white depth photo of a {c}.',
|
| 77 |
+
lambda c: f'the plushie {c}.',
|
| 78 |
+
lambda c: f'a dark depth photo of a {c}.',
|
| 79 |
+
lambda c: f'itap of a {c}.',
|
| 80 |
+
lambda c: f'graffiti of the {c}.',
|
| 81 |
+
lambda c: f'a toy {c}.',
|
| 82 |
+
lambda c: f'itap of my {c}.',
|
| 83 |
+
lambda c: f'a depth photo of a cool {c}.',
|
| 84 |
+
lambda c: f'a depth photo of a small {c}.',
|
| 85 |
+
lambda c: f'a tattoo of the {c}.',
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# a much smaller subset of above prompts
|
| 90 |
+
# from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
|
| 91 |
+
SIMPLE_IMAGENET_TEMPLATES = (
|
| 92 |
+
lambda c: f'itap of a {c}.',
|
| 93 |
+
lambda c: f'a bad depth photo of the {c}.',
|
| 94 |
+
lambda c: f'a origami {c}.',
|
| 95 |
+
lambda c: f'a depth photo of the large {c}.',
|
| 96 |
+
lambda c: f'a {c} in a video game.',
|
| 97 |
+
lambda c: f'art of the {c}.',
|
| 98 |
+
lambda c: f'a depth photo of the small {c}.',
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
IMAGENET_CLASSNAMES = (
|
| 103 |
+
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
CLASSNAMES = {
|
| 108 |
+
'NYUV2': (
|
| 109 |
+
"bathroom", "bedroom", "bookstore", "classroom", "dining room",
|
| 110 |
+
"home office", "kitchen", "living room", "office", "others"
|
| 111 |
+
),
|
| 112 |
+
'SUNRGBD': (
|
| 113 |
+
"bathroom", "bedroom", "classroom", "computer room", "conference room", "corridor", "dining area",
|
| 114 |
+
"dining room", "discussion area", "furniture store", "home office", "kitchen", "lab", "lecture theatre",
|
| 115 |
+
"library", "living room", "office", "rest space", "study space"
|
| 116 |
+
),
|
| 117 |
+
}
|
RAG/Knowledge_Database/languagebind_main/d_cls/zeroshot_cls.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from training.distributed import is_master
|
| 6 |
+
from .zero_shot import zero_shot_eval
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import wandb
|
| 10 |
+
except ImportError:
|
| 11 |
+
wandb = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def evaluate_d_cls(model, data, epoch, args, tb_writer=None):
|
| 16 |
+
metrics = {}
|
| 17 |
+
if not is_master(args):
|
| 18 |
+
return metrics
|
| 19 |
+
model.eval()
|
| 20 |
+
|
| 21 |
+
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
|
| 22 |
+
metrics.update(zero_shot_metrics)
|
| 23 |
+
|
| 24 |
+
if not metrics:
|
| 25 |
+
return metrics
|
| 26 |
+
|
| 27 |
+
logging.info(
|
| 28 |
+
f"Eval Epoch: {epoch} "
|
| 29 |
+
+ "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
if args.save_logs:
|
| 33 |
+
for name, val in metrics.items():
|
| 34 |
+
if tb_writer is not None:
|
| 35 |
+
tb_writer.add_scalar(f"val/d_cls/{args.val_d_cls_data[0].lower()}/{name}", val, epoch)
|
| 36 |
+
args.d_cls_output_dir = os.path.join(args.log_base_path, f'd_cls/{args.val_d_cls_data[0].lower()}')
|
| 37 |
+
os.makedirs(args.d_cls_output_dir, exist_ok=True)
|
| 38 |
+
with open(os.path.join(args.d_cls_output_dir, "results.jsonl"), "a+") as f:
|
| 39 |
+
f.write(json.dumps(metrics))
|
| 40 |
+
f.write("\n")
|
| 41 |
+
|
| 42 |
+
if args.wandb:
|
| 43 |
+
assert wandb is not None, 'Please install wandb.'
|
| 44 |
+
for name, val in metrics.items():
|
| 45 |
+
wandb.log({f"val/{name}": val, 'epoch': epoch})
|
| 46 |
+
|
| 47 |
+
return metrics
|
RAG/Knowledge_Database/languagebind_main/data/base_datasets.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import io
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import os.path
|
| 6 |
+
import random
|
| 7 |
+
import re
|
| 8 |
+
import time
|
| 9 |
+
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from a_cls.dataloader import make_midname_dict
|
| 13 |
+
from open_clip import get_tokenizer
|
| 14 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 15 |
+
from .process_video import load_and_transform_video, get_video_transform
|
| 16 |
+
from .process_audio import load_and_transform_audio, get_audio_transform
|
| 17 |
+
from .process_text import load_and_transform_text
|
| 18 |
+
from .process_depth import load_and_transform_depth, get_depth_transform
|
| 19 |
+
from .process_thermal import load_and_transform_thermal, get_thermal_transform
|
| 20 |
+
|
| 21 |
+
import argparse
|
| 22 |
+
from os.path import join as opj
|
| 23 |
+
from torch.utils.data import Dataset, DataLoader
|
| 24 |
+
from tqdm import tqdm
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class VAT_dataset(Dataset):
|
| 29 |
+
def __init__(self, args):
|
| 30 |
+
super().__init__()
|
| 31 |
+
self.video_decode_backend = args.video_decode_backend
|
| 32 |
+
self.num_frames = args.num_frames
|
| 33 |
+
self.text_type = args.text_type
|
| 34 |
+
self.total_text = ['raw', 'mplug', 'polish_mplug', 'sound_mplug'] + [f'ofa{i}' for i in range(8)]
|
| 35 |
+
self.weight = [0.2, 0.2, 0.2, 0.2] + [0.2 / 8] * 8
|
| 36 |
+
self.title = self.text_type == 'raw'
|
| 37 |
+
self.data_root = '/apdcephfs_cq3/share_1311970/A_Youtube'
|
| 38 |
+
if args.clip_type != 'al':
|
| 39 |
+
with open(args.train_data, 'r') as f:
|
| 40 |
+
self.id2title_folder_caps = json.load(f)
|
| 41 |
+
self.ids = list(self.id2title_folder_caps.keys())[:args.train_num_samples]
|
| 42 |
+
else:
|
| 43 |
+
self.id2path_cap, self.ids = get_audio_anno()
|
| 44 |
+
|
| 45 |
+
self.clip_type = args.clip_type
|
| 46 |
+
|
| 47 |
+
self.num_mel_bins = args.num_mel_bins
|
| 48 |
+
self.target_length = args.target_length
|
| 49 |
+
self.audio_sample_rate = args.audio_sample_rate
|
| 50 |
+
self.audio_mean = args.audio_mean
|
| 51 |
+
self.audio_std = args.audio_std
|
| 52 |
+
|
| 53 |
+
# self.audio_error_file = open('./audio_error_id.txt', 'w')
|
| 54 |
+
|
| 55 |
+
self.tokenizer = get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir)
|
| 56 |
+
self.video_transform = get_video_transform(args)
|
| 57 |
+
self.audio_transform = get_audio_transform(args)
|
| 58 |
+
self.depth_transform = get_depth_transform(args)
|
| 59 |
+
self.thermal_transform = get_thermal_transform(args)
|
| 60 |
+
|
| 61 |
+
def __len__(self):
|
| 62 |
+
return len(self.ids)
|
| 63 |
+
# return self.id2title_folder_caps.shape[0]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def __getitem__(self, idx):
|
| 67 |
+
try:
|
| 68 |
+
if self.clip_type == 'al':
|
| 69 |
+
matched_modality, input_ids, attention_mask = self.get_audio_text(idx)
|
| 70 |
+
return matched_modality, input_ids, attention_mask
|
| 71 |
+
else:
|
| 72 |
+
id = self.ids[idx]
|
| 73 |
+
folder = self.id2title_folder_caps[id]['folder']
|
| 74 |
+
text_output, ofa_number = self.get_text(id)
|
| 75 |
+
input_ids, attention_mask = text_output['input_ids'], text_output['attention_mask']
|
| 76 |
+
if self.clip_type == 'vl' or self.clip_type == 'vl_new':
|
| 77 |
+
matched_modality = self.get_video(id, folder)
|
| 78 |
+
# elif self.clip_type == 'al':
|
| 79 |
+
# matched_modality = self.get_audio(id, folder)
|
| 80 |
+
elif self.clip_type == 'dl':
|
| 81 |
+
matched_modality = self.get_depth(id, folder, ofa_number)
|
| 82 |
+
elif self.clip_type == 'tl':
|
| 83 |
+
matched_modality = self.get_thermal(id, folder, ofa_number)
|
| 84 |
+
return matched_modality['pixel_values'], input_ids, attention_mask
|
| 85 |
+
except Exception as error_msg:
|
| 86 |
+
logging.info(f"Failed at {idx} with \"{error_msg}\"")
|
| 87 |
+
return self.__getitem__(random.randint(0, self.__len__()-1))
|
| 88 |
+
|
| 89 |
+
def get_video(self, id, folder):
|
| 90 |
+
# video_path = opj(self.data_root, folder, f'{id}.mp4')
|
| 91 |
+
resize_folder = 'new_download_resize256_skip15' if folder.startswith('new_') else f'{folder}_resize256_skip15'
|
| 92 |
+
video_path = opj(self.data_root, resize_folder, f'{id}.mp4')
|
| 93 |
+
video = load_and_transform_video(video_path, self.video_transform,
|
| 94 |
+
video_decode_backend=self.video_decode_backend, num_frames=self.num_frames)
|
| 95 |
+
return video
|
| 96 |
+
|
| 97 |
+
def get_audio_text(self, idx):
|
| 98 |
+
|
| 99 |
+
path_cap = self.id2path_cap[self.ids[idx]]
|
| 100 |
+
audio_path = path_cap['path']
|
| 101 |
+
audio_data = load_and_transform_audio(audio_path, self.audio_transform)
|
| 102 |
+
|
| 103 |
+
caption = path_cap['caption']
|
| 104 |
+
if isinstance(caption, list):
|
| 105 |
+
if isinstance(caption[0], str) and len(caption) > 1:
|
| 106 |
+
caption = random.choice(caption)
|
| 107 |
+
else:
|
| 108 |
+
caption = caption[0]
|
| 109 |
+
|
| 110 |
+
input_ids, attention_mask = self.tokenizer(caption)
|
| 111 |
+
|
| 112 |
+
return audio_data, input_ids.squeeze(), attention_mask.squeeze()
|
| 113 |
+
|
| 114 |
+
# def get_audio(self, idx):
|
| 115 |
+
'''
|
| 116 |
+
audio_path = opj(self.data_root, folder, f'{id}.mp3')
|
| 117 |
+
if os.path.exists(audio_path):
|
| 118 |
+
pass
|
| 119 |
+
else:
|
| 120 |
+
audio_path = audio_path[:-4] + '.m4a'
|
| 121 |
+
if os.path.exists(audio_path):
|
| 122 |
+
pass
|
| 123 |
+
else:
|
| 124 |
+
audio_path = audio_path[:-4] + '.wav'
|
| 125 |
+
if not os.path.exists(audio_path):
|
| 126 |
+
# self.audio_error_file.write(audio_path[:-4] + '\n')
|
| 127 |
+
raise FileNotFoundError(f'Not found audio file at \'{audio_path[:-4]}\' with .mp3 .m4a .wav')
|
| 128 |
+
# AudioSegment.from_file(audio_path).export(audio_path[:-4] + '.mp3', format='mp3')
|
| 129 |
+
# audio_path = opj(self.data_root, folder, f'{id}.mp3')
|
| 130 |
+
audio = load_and_transform_audio(audio_path, self.audio_transform)
|
| 131 |
+
'''
|
| 132 |
+
|
| 133 |
+
# audio_path = opj(self.data_root, folder+'_ffmpeg_mp3', f'{id}.mp3')
|
| 134 |
+
# audio = load_and_transform_audio(audio_path, self.audio_transform)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
'''
|
| 138 |
+
audiocap_id = self.meta['uniq_id'][idx]
|
| 139 |
+
audio_path = f'/apdcephfs_cq3/share_1311970/downstream_datasets/Audio/audiocaps/audio/train/{audiocap_id}.flac'
|
| 140 |
+
audio_data = load_and_transform_audio(audio_path, self.audio_transform)
|
| 141 |
+
|
| 142 |
+
caption = self.meta['text'][idx]
|
| 143 |
+
input_ids, attention_mask = self.tokenizer(caption)
|
| 144 |
+
return audio_data, input_ids.squeeze(), attention_mask.squeeze()
|
| 145 |
+
'''
|
| 146 |
+
|
| 147 |
+
'''
|
| 148 |
+
path_cap = self.id2path_cap[self.ids[idx]]
|
| 149 |
+
audio_path = f"/remote-home/freesound/{path_cap['path']}"
|
| 150 |
+
audio_data = load_and_transform_audio(audio_path, self.audio_transform)
|
| 151 |
+
|
| 152 |
+
caption = path_cap['caption']
|
| 153 |
+
input_ids, attention_mask = self.tokenizer(caption)
|
| 154 |
+
'''
|
| 155 |
+
|
| 156 |
+
# return audio
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def get_text(self, id):
|
| 160 |
+
if self.text_type != 'mix':
|
| 161 |
+
text = self.id2title_folder_caps[id][self.text_type]
|
| 162 |
+
text_output = load_and_transform_text(text, self.tokenizer, title=self.title)
|
| 163 |
+
return text_output, None
|
| 164 |
+
else:
|
| 165 |
+
text_type = random.choices(self.total_text, self.weight)[0]
|
| 166 |
+
ofa_number = None
|
| 167 |
+
if text_type.startswith('ofa'):
|
| 168 |
+
ofa_number = int(text_type[-1])
|
| 169 |
+
text = self.id2title_folder_caps[id]['ofa'][ofa_number]
|
| 170 |
+
else:
|
| 171 |
+
text = self.id2title_folder_caps[id][text_type]
|
| 172 |
+
text_output = load_and_transform_text(text, self.tokenizer, title=text_type=='raw')
|
| 173 |
+
return text_output, ofa_number
|
| 174 |
+
|
| 175 |
+
def get_depth(self, id, folder, ofa_number):
|
| 176 |
+
depth_folder = opj(self.data_root, folder, f'{id}_depth_f8glpn_folder')
|
| 177 |
+
random_id = random.randint(0, 7) if ofa_number is None else ofa_number
|
| 178 |
+
# random_id = 3
|
| 179 |
+
depth_path = os.path.join(depth_folder, f'{random_id}.png')
|
| 180 |
+
depth = load_and_transform_depth(depth_path, self.depth_transform)
|
| 181 |
+
return depth
|
| 182 |
+
|
| 183 |
+
def get_thermal(self, id, folder, ofa_number):
|
| 184 |
+
thermal_folder = opj(self.data_root, folder, f'{id}_thermal_folder')
|
| 185 |
+
random_id = random.randint(0, 7) if ofa_number is None else ofa_number
|
| 186 |
+
# random_id = 3
|
| 187 |
+
thermal_path = os.path.join(thermal_folder, f'{random_id}.jpg')
|
| 188 |
+
thermal = load_and_transform_thermal(thermal_path, self.thermal_transform)
|
| 189 |
+
return thermal
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
if __name__ == '__main__':
|
| 194 |
+
parser = argparse.ArgumentParser('Pre-training', add_help=False)
|
| 195 |
+
parser.add_argument('--num_frames', default=8, type=float, help='')
|
| 196 |
+
parser.add_argument('--workers', default=10, type=int, help='')
|
| 197 |
+
args = parser.parse_args()
|
| 198 |
+
|
| 199 |
+
args.cache_dir = 'D:\Omni-modal-hf'
|
| 200 |
+
args.num_frames = 8
|
| 201 |
+
args.clip_type = 'vl'
|
| 202 |
+
args.num_mel_bins = 128
|
| 203 |
+
args.target_length = 1024
|
| 204 |
+
args.audio_sample_rate = 16000
|
| 205 |
+
args.audio_mean = 1
|
| 206 |
+
args.audio_std = 1
|
| 207 |
+
args.rank = 0
|
| 208 |
+
args.batch_size = 16
|
| 209 |
+
|
| 210 |
+
train_dataset = VAT_dataset(args)
|
| 211 |
+
load = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.workers)
|
| 212 |
+
|
| 213 |
+
for samples in tqdm((load)):
|
| 214 |
+
matched_modality, input_ids, attention_mask = samples
|
| 215 |
+
# print(video.shape, text.shape)
|
RAG/Knowledge_Database/languagebind_main/data/bpe_simple_vocab_16e6.txt.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
+
size 1356917
|
RAG/Knowledge_Database/languagebind_main/data/build_datasets.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from multiprocessing import Value
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch.utils.data import DataLoader
|
| 8 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 9 |
+
|
| 10 |
+
from data.base_datasets import VAT_dataset
|
| 11 |
+
from data.new_loadvat import get_wds_dataset
|
| 12 |
+
from open_clip import get_tokenizer
|
| 13 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class SharedEpoch:
|
| 17 |
+
def __init__(self, epoch: int = 0):
|
| 18 |
+
self.shared_epoch = Value('i', epoch)
|
| 19 |
+
|
| 20 |
+
def set_value(self, epoch):
|
| 21 |
+
self.shared_epoch.value = epoch
|
| 22 |
+
|
| 23 |
+
def get_value(self):
|
| 24 |
+
return self.shared_epoch.value
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class DataInfo:
|
| 28 |
+
dataloader: DataLoader
|
| 29 |
+
sampler: DistributedSampler = None
|
| 30 |
+
shared_epoch: SharedEpoch = None
|
| 31 |
+
|
| 32 |
+
def set_epoch(self, epoch):
|
| 33 |
+
if self.shared_epoch is not None:
|
| 34 |
+
self.shared_epoch.set_value(epoch)
|
| 35 |
+
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
|
| 36 |
+
self.sampler.set_epoch(epoch)
|
| 37 |
+
|
| 38 |
+
def get_VAT_dataset(args):
|
| 39 |
+
dataset = VAT_dataset(args)
|
| 40 |
+
num_samples = len(dataset)
|
| 41 |
+
sampler = DistributedSampler(dataset) if args.distributed else None
|
| 42 |
+
shuffle = sampler is None
|
| 43 |
+
|
| 44 |
+
dataloader = DataLoader(
|
| 45 |
+
dataset,
|
| 46 |
+
batch_size=args.batch_size,
|
| 47 |
+
# prefetch_factor=2,
|
| 48 |
+
# persistent_workers=True,
|
| 49 |
+
shuffle=shuffle,
|
| 50 |
+
num_workers=args.workers,
|
| 51 |
+
pin_memory=True,
|
| 52 |
+
sampler=sampler,
|
| 53 |
+
drop_last=True,
|
| 54 |
+
)
|
| 55 |
+
dataloader.num_samples = num_samples
|
| 56 |
+
dataloader.num_batches = len(dataloader)
|
| 57 |
+
|
| 58 |
+
return DataInfo(dataloader, sampler)
|
| 59 |
+
|
| 60 |
+
def get_data(args, epoch=0):
|
| 61 |
+
data = {}
|
| 62 |
+
|
| 63 |
+
if args.do_train:
|
| 64 |
+
if args.train_data.endswith(".json"):
|
| 65 |
+
data[f"{args.clip_type}_pt"] = get_VAT_dataset(args)
|
| 66 |
+
elif args.train_data.endswith(".tar"):
|
| 67 |
+
data[f"{args.clip_type}_pt"] = get_wds_dataset(args, is_train=True, epoch=epoch)
|
| 68 |
+
else:
|
| 69 |
+
raise NameError
|
| 70 |
+
|
| 71 |
+
if args.do_eval:
|
| 72 |
+
temp_batch_size = args.batch_size
|
| 73 |
+
args.batch_size = 8 if args.val_vl_ret_data else 16
|
| 74 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/VideoTextRetrieval/vtRetdata"
|
| 75 |
+
if args.val_vl_ret_data:
|
| 76 |
+
data["vl_ret"] = []
|
| 77 |
+
for val_vl_ret_data in args.val_vl_ret_data:
|
| 78 |
+
if val_vl_ret_data == "msrvtt":
|
| 79 |
+
args.train_csv = os.path.join(f'{data_root}/MSRVTT/MSRVTT_train.9k.csv')
|
| 80 |
+
args.val_csv = os.path.join(f'{data_root}/MSRVTT/MSRVTT_JSFUSION_test.csv')
|
| 81 |
+
args.data_path = os.path.join(f'{data_root}/MSRVTT/MSRVTT_data.json')
|
| 82 |
+
args.features_path = os.path.join(f'{data_root}/MSRVTT/MSRVTT_Videos')
|
| 83 |
+
elif val_vl_ret_data == "msvd":
|
| 84 |
+
args.data_path = os.path.join(f'{data_root}/MSVD')
|
| 85 |
+
args.features_path = os.path.join(f'{data_root}/MSVD/MSVD_Videos')
|
| 86 |
+
elif val_vl_ret_data == "activity":
|
| 87 |
+
args.data_path = os.path.join(f'{data_root}/ActivityNet')
|
| 88 |
+
args.features_path = os.path.join(f'{data_root}/ActivityNet/Videos/Activity_Videos')
|
| 89 |
+
elif val_vl_ret_data == "didemo":
|
| 90 |
+
args.data_path = os.path.join(f'{data_root}/Didemo')
|
| 91 |
+
args.features_path = os.path.join(f'{data_root}/Didemo/videos')
|
| 92 |
+
else:
|
| 93 |
+
raise NameError
|
| 94 |
+
|
| 95 |
+
args.batch_size_val = args.batch_size if args.batch_size_val == 0 else args.batch_size_val
|
| 96 |
+
args.max_frames = args.num_frames
|
| 97 |
+
args.num_thread_reader = args.workers
|
| 98 |
+
args.slice_framepos = 2 # "0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly."
|
| 99 |
+
|
| 100 |
+
from vl_ret.data_dataloaders import DATALOADER_DICT
|
| 101 |
+
|
| 102 |
+
tokenizer = get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir)
|
| 103 |
+
test_dataloader, test_length = None, 0
|
| 104 |
+
if DATALOADER_DICT[val_vl_ret_data]["test"] is not None:
|
| 105 |
+
test_dataloader, test_length = DATALOADER_DICT[val_vl_ret_data]["test"](args, tokenizer)
|
| 106 |
+
|
| 107 |
+
if DATALOADER_DICT[val_vl_ret_data]["val"] is not None:
|
| 108 |
+
val_dataloader, val_length = DATALOADER_DICT[val_vl_ret_data]["val"](args, tokenizer, subset="val")
|
| 109 |
+
else:
|
| 110 |
+
val_dataloader, val_length = test_dataloader, test_length
|
| 111 |
+
## report validation results if the ["test"] is None
|
| 112 |
+
if test_dataloader is None:
|
| 113 |
+
test_dataloader, test_length = val_dataloader, val_length
|
| 114 |
+
|
| 115 |
+
data["vl_ret"].append({val_vl_ret_data: test_dataloader})
|
| 116 |
+
|
| 117 |
+
if args.val_v_cls_data:
|
| 118 |
+
data["v_cls"] = []
|
| 119 |
+
temp_val_v_cls_data = args.val_v_cls_data
|
| 120 |
+
for val_v_cls_data in temp_val_v_cls_data:
|
| 121 |
+
from v_cls import get_video_cls_dataloader
|
| 122 |
+
args.val_v_cls_data = val_v_cls_data
|
| 123 |
+
if args.val_v_cls_data == 'Kinetics-400':
|
| 124 |
+
args.video_data_path = "/apdcephfs_cq3/share_1311970/downstream_datasets/VideoCls/new_k400/Kinetics-400/raw/Kinetics-400"
|
| 125 |
+
args.nb_classes = 400
|
| 126 |
+
elif args.val_v_cls_data == 'Kinetics-600':
|
| 127 |
+
args.video_data_path = "/apdcephfs_cq3/share_1311970/downstream_datasets/VideoCls/new_k600/Kinetics600/raw/Kinetics600"
|
| 128 |
+
args.nb_classes = 600
|
| 129 |
+
args.data_root = args.video_data_path
|
| 130 |
+
args.data_set = val_v_cls_data
|
| 131 |
+
args.dist_eval = True
|
| 132 |
+
args.sampling_rate = 8
|
| 133 |
+
args.num_sample = 1
|
| 134 |
+
args.test_num_segment = 5
|
| 135 |
+
args.test_num_crop = 3
|
| 136 |
+
args.num_workers = args.workers
|
| 137 |
+
data['v_cls'].append({val_v_cls_data: get_video_cls_dataloader(args)})
|
| 138 |
+
args.val_v_cls_data = temp_val_v_cls_data
|
| 139 |
+
|
| 140 |
+
if args.val_a_cls_data:
|
| 141 |
+
temp_audio_mean, temp_audio_std = args.audio_mean, args.audio_std
|
| 142 |
+
args.audio_mean, args.audio_std = -4.2677393, 4.5689974
|
| 143 |
+
data["a_cls"] = []
|
| 144 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/Audio"
|
| 145 |
+
temp_val_a_cls_data = args.val_a_cls_data
|
| 146 |
+
for val_a_cls_data in temp_val_a_cls_data:
|
| 147 |
+
from a_cls.datasets import get_audio_dataset
|
| 148 |
+
args.val_a_cls_data = val_a_cls_data
|
| 149 |
+
args.audio_data_path = os.path.join(data_root, f'{val_a_cls_data.lower()}/test')
|
| 150 |
+
data['a_cls'].append({val_a_cls_data: get_audio_dataset(args)})
|
| 151 |
+
args.val_a_cls_data = temp_val_a_cls_data
|
| 152 |
+
args.audio_mean, args.audio_mean = temp_audio_mean, temp_audio_std
|
| 153 |
+
|
| 154 |
+
if args.val_al_ret_data:
|
| 155 |
+
temp_audio_mean, temp_audio_std = args.audio_mean, args.audio_std
|
| 156 |
+
args.audio_mean, args.audio_std = -4.2677393, 4.5689974
|
| 157 |
+
|
| 158 |
+
data["al_ret"] = []
|
| 159 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/Audio"
|
| 160 |
+
temp_val_al_ret_data = args.val_al_ret_data
|
| 161 |
+
for val_al_ret_data in temp_val_al_ret_data:
|
| 162 |
+
from al_ret.datasets import get_audio_dataset
|
| 163 |
+
args.val_al_ret_data = val_al_ret_data
|
| 164 |
+
if val_al_ret_data.lower() != 'msrvtt':
|
| 165 |
+
args.audio_data_path = os.path.join(data_root, val_al_ret_data.lower())
|
| 166 |
+
data['al_ret'].append({val_al_ret_data: get_audio_dataset(args)})
|
| 167 |
+
elif val_al_ret_data.lower() == 'msrvtt':
|
| 168 |
+
args.train_csv = os.path.join(f'/apdcephfs_cq3/share_1311970/downstream_datasets/VideoTextRetrieval/vtRetdata/MSRVTT/MSRVTT_train.9k.csv')
|
| 169 |
+
args.val_csv = os.path.join(f'/apdcephfs_cq3/share_1311970/downstream_datasets/VideoTextRetrieval/Audio/MSRVTT/MSRVTT_AUDIO_test.csv')
|
| 170 |
+
args.data_path = os.path.join(f'/apdcephfs_cq3/share_1311970/downstream_datasets/VideoTextRetrieval/vtRetdata/MSRVTT/MSRVTT_data.json')
|
| 171 |
+
args.features_path = os.path.join(f'/apdcephfs_cq3/share_1311970/downstream_datasets/VideoTextRetrieval/Audio/MSRVTT/videos/all')
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
args.num_thread_reader = args.workers
|
| 175 |
+
from al_ret.data_dataloaders import DATALOADER_DICT
|
| 176 |
+
args.batch_size_val = args.batch_size if args.batch_size_val == 0 else args.batch_size_val
|
| 177 |
+
|
| 178 |
+
tokenizer = get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir)
|
| 179 |
+
test_dataloader, test_length = None, 0
|
| 180 |
+
if DATALOADER_DICT[val_al_ret_data.lower()]["test"] is not None:
|
| 181 |
+
test_dataloader, test_length = DATALOADER_DICT[val_al_ret_data.lower()]["test"](args, tokenizer)
|
| 182 |
+
|
| 183 |
+
if DATALOADER_DICT[val_al_ret_data.lower()]["val"] is not None:
|
| 184 |
+
val_dataloader, val_length = DATALOADER_DICT[val_al_ret_data.lower()]["val"](args, tokenizer, subset="val")
|
| 185 |
+
else:
|
| 186 |
+
val_dataloader, val_length = test_dataloader, test_length
|
| 187 |
+
## report validation results if the ["test"] is None
|
| 188 |
+
if test_dataloader is None:
|
| 189 |
+
test_dataloader, test_length = val_dataloader, val_length
|
| 190 |
+
data['al_ret'].append({val_al_ret_data: test_dataloader})
|
| 191 |
+
|
| 192 |
+
args.val_al_ret_data = temp_val_al_ret_data
|
| 193 |
+
args.audio_mean, args.audio_mean = temp_audio_mean, temp_audio_std
|
| 194 |
+
|
| 195 |
+
if args.val_a_cls_data:
|
| 196 |
+
temp_audio_mean, temp_audio_std = args.audio_mean, args.audio_std
|
| 197 |
+
args.audio_mean, args.audio_std = -4.2677393, 4.5689974
|
| 198 |
+
data["a_cls"] = []
|
| 199 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/Audio"
|
| 200 |
+
temp_val_a_cls_data = args.val_a_cls_data
|
| 201 |
+
for val_a_cls_data in temp_val_a_cls_data:
|
| 202 |
+
from a_cls.datasets import get_audio_dataset
|
| 203 |
+
args.val_a_cls_data = val_a_cls_data
|
| 204 |
+
args.audio_data_path = os.path.join(data_root, f'{val_a_cls_data.lower()}/test')
|
| 205 |
+
data['a_cls'].append({val_a_cls_data: get_audio_dataset(args)})
|
| 206 |
+
args.val_a_cls_data = temp_val_a_cls_data
|
| 207 |
+
args.audio_mean, args.audio_mean = temp_audio_mean, temp_audio_std
|
| 208 |
+
|
| 209 |
+
if args.imagenet_val is not None:
|
| 210 |
+
from i_cls.datasets import get_imagenet
|
| 211 |
+
data['i_cls'] = {}
|
| 212 |
+
data['i_cls']["imagenet-val"] = get_imagenet(args, "val")
|
| 213 |
+
if args.imagenet_v2 is not None:
|
| 214 |
+
from i_cls.datasets import get_imagenet
|
| 215 |
+
if data.get('i_cls', None) is None:
|
| 216 |
+
data['i_cls'] = {}
|
| 217 |
+
data['i_cls']["imagenet-v2"] = get_imagenet(args, "v2")
|
| 218 |
+
|
| 219 |
+
if args.val_d_cls_data:
|
| 220 |
+
data["d_cls"] = []
|
| 221 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/Depth"
|
| 222 |
+
temp_val_d_cls_data = args.val_d_cls_data
|
| 223 |
+
for val_d_cls_data in temp_val_d_cls_data:
|
| 224 |
+
from d_cls.datasets import get_depth_dataset
|
| 225 |
+
args.val_d_cls_data = val_d_cls_data
|
| 226 |
+
args.depth_data_path = os.path.join(data_root, f'{val_d_cls_data.lower()}/data/val')
|
| 227 |
+
data['d_cls'].append({val_d_cls_data: get_depth_dataset(args)})
|
| 228 |
+
args.val_d_cls_data = temp_val_d_cls_data
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
if args.val_t_cls_data:
|
| 232 |
+
data["t_cls"] = []
|
| 233 |
+
data_root = "/apdcephfs_cq3/share_1311970/downstream_datasets/Thermal"
|
| 234 |
+
temp_val_t_cls_data = args.val_t_cls_data
|
| 235 |
+
for val_t_cls_data in temp_val_t_cls_data:
|
| 236 |
+
from t_cls.datasets import get_thermal_dataset
|
| 237 |
+
args.val_t_cls_data = val_t_cls_data
|
| 238 |
+
args.thermal_data_path = os.path.join(data_root, f'{val_t_cls_data.lower()}/val')
|
| 239 |
+
data['t_cls'].append({val_t_cls_data: get_thermal_dataset(args)})
|
| 240 |
+
args.val_t_cls_data = temp_val_t_cls_data
|
| 241 |
+
|
| 242 |
+
args.batch_size = temp_batch_size
|
| 243 |
+
|
| 244 |
+
return data
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
|
RAG/Knowledge_Database/languagebind_main/data/new_loadvat.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import io
|
| 3 |
+
import json
|
| 4 |
+
import logging
|
| 5 |
+
import math
|
| 6 |
+
import os
|
| 7 |
+
import random
|
| 8 |
+
import sys
|
| 9 |
+
import braceexpand
|
| 10 |
+
from dataclasses import dataclass
|
| 11 |
+
from multiprocessing import Value
|
| 12 |
+
|
| 13 |
+
import numpy.lib.format
|
| 14 |
+
import numpy as np
|
| 15 |
+
import pandas as pd
|
| 16 |
+
import torch
|
| 17 |
+
import torchvision.datasets as datasets
|
| 18 |
+
import webdataset as wds
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
|
| 21 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 22 |
+
from torchvision.transforms import ToTensor
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
from webdataset.filters import _shuffle
|
| 25 |
+
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
|
| 26 |
+
|
| 27 |
+
from open_clip import get_tokenizer
|
| 28 |
+
from open_clip.factory import HF_HUB_PREFIX
|
| 29 |
+
from training.params import parse_args
|
| 30 |
+
from data.process_text import load_and_transform_text
|
| 31 |
+
from data.process_video import get_video_transform
|
| 32 |
+
from data.process_audio import get_audio_transform
|
| 33 |
+
from data.process_depth import get_depth_transform
|
| 34 |
+
from data.process_thermal import get_thermal_transform
|
| 35 |
+
import pdb
|
| 36 |
+
try:
|
| 37 |
+
import horovod.torch as hvd
|
| 38 |
+
except ImportError:
|
| 39 |
+
hvd = None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class SharedEpoch:
|
| 44 |
+
def __init__(self, epoch: int = 0):
|
| 45 |
+
self.shared_epoch = Value('i', epoch)
|
| 46 |
+
|
| 47 |
+
def set_value(self, epoch):
|
| 48 |
+
self.shared_epoch.value = epoch
|
| 49 |
+
|
| 50 |
+
def get_value(self):
|
| 51 |
+
return self.shared_epoch.value
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class DataInfo:
|
| 56 |
+
dataloader: DataLoader
|
| 57 |
+
sampler: DistributedSampler = None
|
| 58 |
+
shared_epoch: SharedEpoch = None
|
| 59 |
+
|
| 60 |
+
def set_epoch(self, epoch):
|
| 61 |
+
if self.shared_epoch is not None:
|
| 62 |
+
self.shared_epoch.set_value(epoch)
|
| 63 |
+
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
|
| 64 |
+
self.sampler.set_epoch(epoch)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def expand_urls(urls, weights=None):
|
| 68 |
+
if weights is None:
|
| 69 |
+
expanded_urls = wds.shardlists.expand_urls(urls)
|
| 70 |
+
return expanded_urls, None
|
| 71 |
+
if isinstance(urls, str):
|
| 72 |
+
urllist = urls.split("::")
|
| 73 |
+
weights = weights.split('::')
|
| 74 |
+
assert len(weights) == len(urllist), \
|
| 75 |
+
f"Expected the number of data components ({len(urllist)}) and weights({len(weights)}) to match."
|
| 76 |
+
weights = [float(weight) for weight in weights]
|
| 77 |
+
all_urls, all_weights = [], []
|
| 78 |
+
for url, weight in zip(urllist, weights):
|
| 79 |
+
expanded_url = list(braceexpand.braceexpand(url))
|
| 80 |
+
expanded_weights = [weight for _ in expanded_url]
|
| 81 |
+
all_urls.extend(expanded_url)
|
| 82 |
+
all_weights.extend(expanded_weights)
|
| 83 |
+
return all_urls, all_weights
|
| 84 |
+
else:
|
| 85 |
+
all_urls = list(urls)
|
| 86 |
+
return all_urls, weights
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_dataset_size(shards):
|
| 90 |
+
shards_list, _ = expand_urls(shards)
|
| 91 |
+
dir_path = os.path.dirname(shards_list[0])
|
| 92 |
+
sizes_filename = os.path.join(dir_path, 'sizes.json')
|
| 93 |
+
len_filename = os.path.join(dir_path, '__len__')
|
| 94 |
+
if os.path.exists(sizes_filename):
|
| 95 |
+
sizes = json.load(open(sizes_filename, 'r'))
|
| 96 |
+
total_size = sum([int(sizes[os.path.basename(shard)]) for shard in shards_list])
|
| 97 |
+
elif os.path.exists(len_filename):
|
| 98 |
+
# FIXME this used to be eval(open(...)) but that seemed rather unsafe
|
| 99 |
+
total_size = ast.literal_eval(open(len_filename, 'r').read())
|
| 100 |
+
else:
|
| 101 |
+
total_size = None # num samples undefined
|
| 102 |
+
# some common dataset sizes (at time of authors last download)
|
| 103 |
+
# CC3M (train): 2905954
|
| 104 |
+
# CC12M: 10968539
|
| 105 |
+
# LAION-400M: 407332084
|
| 106 |
+
# LAION-2B (english): 2170337258
|
| 107 |
+
num_shards = len(shards_list)
|
| 108 |
+
return total_size, num_shards
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def count_samples(dataloader):
|
| 113 |
+
os.environ["WDS_EPOCH"] = "0"
|
| 114 |
+
n_elements, n_batches = 0, 0
|
| 115 |
+
for images, texts in dataloader:
|
| 116 |
+
n_batches += 1
|
| 117 |
+
n_elements += len(images)
|
| 118 |
+
assert len(images) == len(texts)
|
| 119 |
+
return n_elements, n_batches
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def filter_no_caption_or_no_image(sample):
|
| 123 |
+
has_caption = ('raw.txt' in sample and 'mplug.txt' in sample and 'polish_mplug.txt' in sample and 'ofa3.txt' in sample)
|
| 124 |
+
has_image = ('frm7.jpg' in sample and 'tml0.jpg' in sample and 'dep0.npy' in sample)
|
| 125 |
+
return has_caption and has_image
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def log_and_continue(exn):
|
| 129 |
+
"""Call in an exception handler to ignore any exception, issue a warning, and continue."""
|
| 130 |
+
logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
|
| 131 |
+
return True
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
|
| 135 |
+
"""Return function over iterator that groups key, value pairs into samples.
|
| 136 |
+
|
| 137 |
+
:param keys: function that splits the key into key and extension (base_plus_ext)
|
| 138 |
+
:param lcase: convert suffixes to lower case (Default value = True)
|
| 139 |
+
"""
|
| 140 |
+
current_sample = None
|
| 141 |
+
for filesample in data:
|
| 142 |
+
assert isinstance(filesample, dict)
|
| 143 |
+
fname, value = filesample["fname"], filesample["data"]
|
| 144 |
+
prefix, suffix = keys(fname)
|
| 145 |
+
if prefix is None:
|
| 146 |
+
continue
|
| 147 |
+
if lcase:
|
| 148 |
+
suffix = suffix.lower()
|
| 149 |
+
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
|
| 150 |
+
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
|
| 151 |
+
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
|
| 152 |
+
if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
|
| 153 |
+
if valid_sample(current_sample):
|
| 154 |
+
yield current_sample
|
| 155 |
+
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
|
| 156 |
+
if suffixes is None or suffix in suffixes:
|
| 157 |
+
current_sample[suffix] = value
|
| 158 |
+
if valid_sample(current_sample):
|
| 159 |
+
yield current_sample
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def tarfile_to_samples_nothrow(src, handler=log_and_continue):
|
| 163 |
+
# NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
|
| 164 |
+
streams = url_opener(src, handler=handler)
|
| 165 |
+
files = tar_file_expander(streams, handler=handler)
|
| 166 |
+
samples = group_by_keys_nothrow(files, handler=handler)
|
| 167 |
+
return samples
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def pytorch_worker_seed(increment=0):
|
| 171 |
+
"""get dataloader worker seed from pytorch"""
|
| 172 |
+
worker_info = get_worker_info()
|
| 173 |
+
if worker_info is not None:
|
| 174 |
+
# favour using the seed already created for pytorch dataloader workers if it exists
|
| 175 |
+
seed = worker_info.seed
|
| 176 |
+
if increment:
|
| 177 |
+
# space out seed increments so they can't overlap across workers in different iterations
|
| 178 |
+
seed += increment * max(1, worker_info.num_workers)
|
| 179 |
+
return seed
|
| 180 |
+
# fallback to wds rank based seed
|
| 181 |
+
return wds.utils.pytorch_worker_seed()
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
_SHARD_SHUFFLE_SIZE = 200
|
| 185 |
+
_SHARD_SHUFFLE_INITIAL = 50
|
| 186 |
+
_SAMPLE_SHUFFLE_SIZE = 500
|
| 187 |
+
_SAMPLE_SHUFFLE_INITIAL = 100
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class detshuffle2(wds.PipelineStage):
|
| 191 |
+
def __init__(
|
| 192 |
+
self,
|
| 193 |
+
bufsize=1000,
|
| 194 |
+
initial=100,
|
| 195 |
+
seed=0,
|
| 196 |
+
epoch=-1,
|
| 197 |
+
):
|
| 198 |
+
self.bufsize = bufsize
|
| 199 |
+
self.initial = initial
|
| 200 |
+
self.seed = seed
|
| 201 |
+
self.epoch = epoch
|
| 202 |
+
|
| 203 |
+
def run(self, src):
|
| 204 |
+
if isinstance(self.epoch, SharedEpoch):
|
| 205 |
+
epoch = self.epoch.get_value()
|
| 206 |
+
else:
|
| 207 |
+
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
|
| 208 |
+
# situation as different workers may wrap at different times (or not at all).
|
| 209 |
+
self.epoch += 1
|
| 210 |
+
epoch = self.epoch
|
| 211 |
+
rng = random.Random()
|
| 212 |
+
if self.seed < 0:
|
| 213 |
+
# If seed is negative, we use the worker's seed, this will be different across all nodes/workers
|
| 214 |
+
seed = pytorch_worker_seed(epoch)
|
| 215 |
+
else:
|
| 216 |
+
# This seed to be deterministic AND the same across all nodes/workers in each epoch
|
| 217 |
+
seed = self.seed + epoch
|
| 218 |
+
rng.seed(seed)
|
| 219 |
+
return _shuffle(src, self.bufsize, self.initial, rng)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class ResampledShards2(IterableDataset):
|
| 223 |
+
"""An iterable dataset yielding a list of urls."""
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self,
|
| 227 |
+
urls,
|
| 228 |
+
weights=None,
|
| 229 |
+
nshards=sys.maxsize,
|
| 230 |
+
worker_seed=None,
|
| 231 |
+
deterministic=False,
|
| 232 |
+
epoch=-1,
|
| 233 |
+
):
|
| 234 |
+
"""Sample shards from the shard list with replacement.
|
| 235 |
+
|
| 236 |
+
:param urls: a list of URLs as a Python list or brace notation string
|
| 237 |
+
"""
|
| 238 |
+
super().__init__()
|
| 239 |
+
urls, weights = expand_urls(urls, weights)
|
| 240 |
+
self.urls = urls
|
| 241 |
+
self.weights = weights
|
| 242 |
+
if self.weights is not None:
|
| 243 |
+
assert len(self.urls) == len(self.weights), \
|
| 244 |
+
f"Number of urls {len(self.urls)} and weights {len(self.weights)} should match."
|
| 245 |
+
assert isinstance(self.urls[0], str)
|
| 246 |
+
self.nshards = nshards
|
| 247 |
+
self.rng = random.Random()
|
| 248 |
+
self.worker_seed = worker_seed
|
| 249 |
+
self.deterministic = deterministic
|
| 250 |
+
self.epoch = epoch
|
| 251 |
+
|
| 252 |
+
def __iter__(self):
|
| 253 |
+
"""Return an iterator over the shards."""
|
| 254 |
+
if isinstance(self.epoch, SharedEpoch):
|
| 255 |
+
epoch = self.epoch.get_value()
|
| 256 |
+
else:
|
| 257 |
+
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
|
| 258 |
+
# situation as different workers may wrap at different times (or not at all).
|
| 259 |
+
self.epoch += 1
|
| 260 |
+
epoch = self.epoch
|
| 261 |
+
if self.deterministic:
|
| 262 |
+
# reset seed w/ epoch if deterministic
|
| 263 |
+
if self.worker_seed is None:
|
| 264 |
+
# pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id
|
| 265 |
+
seed = pytorch_worker_seed(epoch)
|
| 266 |
+
else:
|
| 267 |
+
seed = self.worker_seed() + epoch
|
| 268 |
+
self.rng.seed(seed)
|
| 269 |
+
for _ in range(self.nshards):
|
| 270 |
+
if self.weights is None:
|
| 271 |
+
yield dict(url=self.rng.choice(self.urls))
|
| 272 |
+
else:
|
| 273 |
+
yield dict(url=self.rng.choices(self.urls, weights=self.weights, k=1)[0])
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
class Decode:
|
| 277 |
+
def __init__(self, args=None):
|
| 278 |
+
self.num_frames = args.num_frames
|
| 279 |
+
self.text_type = args.text_type
|
| 280 |
+
self.chatgpt = self.text_type == 'polish_mplug'
|
| 281 |
+
self.title = self.text_type == 'raw'
|
| 282 |
+
self.clip_type = args.clip_type
|
| 283 |
+
self.tokenizer = get_tokenizer(HF_HUB_PREFIX + args.model, cache_dir=args.cache_dir)
|
| 284 |
+
self.video_transform = get_video_transform(args)
|
| 285 |
+
self.audio_transform = get_audio_transform(args)
|
| 286 |
+
self.depth_transform = get_depth_transform(args)
|
| 287 |
+
self.thermal_transform = get_thermal_transform(args)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def __call__(self, sample):
|
| 291 |
+
input_ids, attention_mask = self.get_text(sample[f"{self.text_type}.txt"], chatgpt=self.chatgpt, title=self.title)
|
| 292 |
+
if self.clip_type == 'vl':
|
| 293 |
+
matched_modality = self.get_video([sample[f"frm{i}.jpg"] for i in range(self.num_frames)])
|
| 294 |
+
elif self.clip_type == 'al':
|
| 295 |
+
matched_modality = self.get_audio()
|
| 296 |
+
elif self.clip_type == 'dl':
|
| 297 |
+
matched_modality = self.get_depth(sample[f"dep0.npy"])
|
| 298 |
+
elif self.clip_type == 'tl':
|
| 299 |
+
matched_modality = self.get_thermal(sample[f"tml0.jpg"])
|
| 300 |
+
# matched_modality = self.get_thermal(sample[f"tml{random.randint(0, 7)}.jpg"])
|
| 301 |
+
else:
|
| 302 |
+
raise ValueError
|
| 303 |
+
return matched_modality, input_ids, attention_mask
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def get_video(self, frames):
|
| 307 |
+
video_data = []
|
| 308 |
+
for frame in frames:
|
| 309 |
+
with io.BytesIO(frame) as stream:
|
| 310 |
+
img = Image.open(stream)
|
| 311 |
+
img.load()
|
| 312 |
+
assert min(img.size) == 256
|
| 313 |
+
result = ToTensor()(img)
|
| 314 |
+
video_data.append(result)
|
| 315 |
+
video_data = torch.stack(video_data, dim=1)
|
| 316 |
+
# video_data torch.Size([3, 8, 455, 256])
|
| 317 |
+
# video_outputs torch.Size([3, 8, 224, 224])
|
| 318 |
+
video_outputs = self.video_transform(video_data)
|
| 319 |
+
return video_outputs
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def get_text(self, text, chatgpt=True, title=False):
|
| 323 |
+
text = text.decode("utf-8")
|
| 324 |
+
if chatgpt:
|
| 325 |
+
assert text.startswith('In the video, ')
|
| 326 |
+
text = text[14:]
|
| 327 |
+
tokens = load_and_transform_text(text, self.tokenizer, title=title)
|
| 328 |
+
return tokens['input_ids'], tokens['attention_mask']
|
| 329 |
+
|
| 330 |
+
def get_audio(self):
|
| 331 |
+
raise NotImplementedError
|
| 332 |
+
|
| 333 |
+
def get_depth(self, depth):
|
| 334 |
+
stream = io.BytesIO(depth)
|
| 335 |
+
img = numpy.lib.format.read_array(stream)
|
| 336 |
+
depth = self.depth_transform(img)
|
| 337 |
+
return depth
|
| 338 |
+
|
| 339 |
+
def get_thermal(self, thermal):
|
| 340 |
+
with io.BytesIO(thermal) as stream:
|
| 341 |
+
img = Image.open(stream)
|
| 342 |
+
img.load()
|
| 343 |
+
thermal = self.thermal_transform(img)
|
| 344 |
+
return thermal
|
| 345 |
+
|
| 346 |
+
def get_wds_dataset(args, is_train, epoch=0, floor=False):
|
| 347 |
+
input_shards = args.train_data if is_train else args.val_data
|
| 348 |
+
assert input_shards is not None
|
| 349 |
+
resampled = getattr(args, 'dataset_resampled', False) and is_train
|
| 350 |
+
|
| 351 |
+
num_shards = None
|
| 352 |
+
if is_train:
|
| 353 |
+
if args.train_num_samples is not None:
|
| 354 |
+
num_samples = args.train_num_samples
|
| 355 |
+
else:
|
| 356 |
+
num_samples, num_shards = get_dataset_size(input_shards)
|
| 357 |
+
if not num_samples:
|
| 358 |
+
raise RuntimeError(
|
| 359 |
+
'Currently, the number of dataset samples must be specified for the training dataset. '
|
| 360 |
+
'Please specify it via `--train-num-samples` if no dataset length info is present.')
|
| 361 |
+
else:
|
| 362 |
+
# Eval will just exhaust the iterator if the size is not specified.
|
| 363 |
+
num_samples = args.val_num_samples or 0
|
| 364 |
+
|
| 365 |
+
shared_epoch = SharedEpoch(epoch=epoch) # create a shared epoch store to sync epoch to dataloader worker proc
|
| 366 |
+
|
| 367 |
+
if resampled:
|
| 368 |
+
pipeline = [ResampledShards2(
|
| 369 |
+
input_shards,
|
| 370 |
+
weights=args.train_data_upsampling_factors,
|
| 371 |
+
deterministic=True,
|
| 372 |
+
epoch=shared_epoch,
|
| 373 |
+
)]
|
| 374 |
+
else:
|
| 375 |
+
assert args.train_data_upsampling_factors is None, \
|
| 376 |
+
"--train_data_upsampling_factors is only supported when sampling with replacement (with --dataset-resampled)."
|
| 377 |
+
pipeline = [wds.SimpleShardList(input_shards)]
|
| 378 |
+
|
| 379 |
+
# at this point we have an iterator over all the shards
|
| 380 |
+
if is_train:
|
| 381 |
+
if not resampled:
|
| 382 |
+
pipeline.extend([
|
| 383 |
+
detshuffle2(
|
| 384 |
+
bufsize=_SHARD_SHUFFLE_SIZE,
|
| 385 |
+
initial=_SHARD_SHUFFLE_INITIAL,
|
| 386 |
+
seed=args.seed,
|
| 387 |
+
epoch=shared_epoch,
|
| 388 |
+
),
|
| 389 |
+
wds.split_by_node,
|
| 390 |
+
wds.split_by_worker,
|
| 391 |
+
])
|
| 392 |
+
pipeline.extend([
|
| 393 |
+
# at this point, we have an iterator over the shards assigned to each worker at each node
|
| 394 |
+
tarfile_to_samples_nothrow, # wds.tarfile_to_samples(handler=log_and_continue),
|
| 395 |
+
wds.shuffle(
|
| 396 |
+
bufsize=_SAMPLE_SHUFFLE_SIZE,
|
| 397 |
+
initial=_SAMPLE_SHUFFLE_INITIAL,
|
| 398 |
+
),
|
| 399 |
+
])
|
| 400 |
+
else:
|
| 401 |
+
pipeline.extend([
|
| 402 |
+
wds.split_by_worker,
|
| 403 |
+
# at this point, we have an iterator over the shards assigned to each worker
|
| 404 |
+
wds.tarfile_to_samples(handler=log_and_continue),
|
| 405 |
+
])
|
| 406 |
+
pipeline.extend([
|
| 407 |
+
wds.select(filter_no_caption_or_no_image),
|
| 408 |
+
# wds.decode("pilrgb", handler=log_and_continue),
|
| 409 |
+
# wds.rename(image="jpg;png;jpeg;webp", text="txt"),
|
| 410 |
+
# wds.map_dict(image=preprocess_img, text=lambda text: tokenizer(text)[0]),
|
| 411 |
+
# wds.to_tuple("image", "text"),
|
| 412 |
+
wds.map(Decode(args), handler=log_and_continue),
|
| 413 |
+
wds.batched(args.batch_size, partial=not is_train)
|
| 414 |
+
])
|
| 415 |
+
|
| 416 |
+
dataset = wds.DataPipeline(*pipeline)
|
| 417 |
+
|
| 418 |
+
if is_train:
|
| 419 |
+
if not resampled:
|
| 420 |
+
num_shards = num_shards or len(expand_urls(input_shards)[0])
|
| 421 |
+
assert num_shards >= args.workers * args.world_size, 'number of shards must be >= total workers'
|
| 422 |
+
# roll over and repeat a few samples to get same number of full batches on each node
|
| 423 |
+
round_fn = math.floor if floor else math.ceil
|
| 424 |
+
global_batch_size = args.batch_size * args.world_size
|
| 425 |
+
num_batches = round_fn(num_samples / global_batch_size)
|
| 426 |
+
num_workers = max(1, args.workers)
|
| 427 |
+
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
|
| 428 |
+
num_batches = num_worker_batches * num_workers
|
| 429 |
+
num_samples = num_batches * global_batch_size
|
| 430 |
+
dataset = dataset.with_epoch(num_worker_batches) # each worker is iterating over this
|
| 431 |
+
else:
|
| 432 |
+
# last batches are partial, eval is done on single (master) node
|
| 433 |
+
num_batches = math.ceil(num_samples / args.batch_size)
|
| 434 |
+
|
| 435 |
+
dataloader = wds.WebLoader(
|
| 436 |
+
dataset,
|
| 437 |
+
batch_size=None,
|
| 438 |
+
shuffle=False,
|
| 439 |
+
num_workers=args.workers,
|
| 440 |
+
persistent_workers=args.workers > 0,
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
# FIXME not clear which approach is better, with_epoch before vs after dataloader?
|
| 444 |
+
# hoping to resolve via https://github.com/webdataset/webdataset/issues/169
|
| 445 |
+
# if is_train:
|
| 446 |
+
# # roll over and repeat a few samples to get same number of full batches on each node
|
| 447 |
+
# global_batch_size = args.batch_size * args.world_size
|
| 448 |
+
# num_batches = math.ceil(num_samples / global_batch_size)
|
| 449 |
+
# num_workers = max(1, args.workers)
|
| 450 |
+
# num_batches = math.ceil(num_batches / num_workers) * num_workers
|
| 451 |
+
# num_samples = num_batches * global_batch_size
|
| 452 |
+
# dataloader = dataloader.with_epoch(num_batches)
|
| 453 |
+
# else:
|
| 454 |
+
# # last batches are partial, eval is done on single (master) node
|
| 455 |
+
# num_batches = math.ceil(num_samples / args.batch_size)
|
| 456 |
+
|
| 457 |
+
# add meta-data to dataloader instance for convenience
|
| 458 |
+
dataloader.num_batches = num_batches
|
| 459 |
+
dataloader.num_samples = num_samples
|
| 460 |
+
|
| 461 |
+
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def get_data(args, epoch=0):
|
| 466 |
+
data = {}
|
| 467 |
+
|
| 468 |
+
data["train"] = get_wds_dataset(args, is_train=True, epoch=epoch)
|
| 469 |
+
|
| 470 |
+
return data
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
if __name__ == '__main__':
|
| 474 |
+
args = parse_args(sys.argv[1:])
|
| 475 |
+
args.workers = 10
|
| 476 |
+
args.batch_size = 16
|
| 477 |
+
args.world_size = 1
|
| 478 |
+
args.num_frames = 8
|
| 479 |
+
args.clip_type = 'vl'
|
| 480 |
+
args.model = "laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K"
|
| 481 |
+
args.train_data = '/apdcephfs_cq3/share_1311970/lb/vat2webdata/check_8frm_title_ofa_polishmplug_1tml_1dep/{00000..03020}.tar'
|
| 482 |
+
args.train_num_samples = 10_000
|
| 483 |
+
args.dataset_type = 'webdataset'
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
data = get_data(args, epoch=0)
|
| 488 |
+
|
| 489 |
+
data['train'].set_epoch(0) # set epoch in process safe manner via sampler or shared_epoch
|
| 490 |
+
dataloader = data['train'].dataloader
|
| 491 |
+
num_batches_per_epoch = dataloader.num_batches // args.accum_freq
|
| 492 |
+
print(num_batches_per_epoch)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
for i, batch in enumerate(tqdm(dataloader)):
|
| 496 |
+
images, input_ids, attention_mask = batch
|
| 497 |
+
# print(images.shape, input_ids.shape, attention_mask.shape)
|
| 498 |
+
# break
|
RAG/Knowledge_Database/languagebind_main/data/process_audio.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
import torchaudio
|
| 6 |
+
import torchvision
|
| 7 |
+
from torchvision.transforms import transforms
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
|
| 10 |
+
torchaudio.set_audio_backend("soundfile")
|
| 11 |
+
|
| 12 |
+
def torchaudio_loader(path):
|
| 13 |
+
return torchaudio.load(path)
|
| 14 |
+
|
| 15 |
+
def int16_to_float32_torch(x):
|
| 16 |
+
return (x / 32767.0).type(torch.float32)
|
| 17 |
+
|
| 18 |
+
def float32_to_int16_torch(x):
|
| 19 |
+
x = torch.clamp(x, min=-1., max=1.)
|
| 20 |
+
return (x * 32767.).type(torch.int16)
|
| 21 |
+
|
| 22 |
+
DEFAULT_AUDIO_FRAME_SHIFT_MS = 10
|
| 23 |
+
|
| 24 |
+
class AudioTransform:
|
| 25 |
+
def __init__(self, args):
|
| 26 |
+
self.sample_rate = args.audio_sample_rate
|
| 27 |
+
self.num_mel_bins = args.num_mel_bins
|
| 28 |
+
self.target_length = args.target_length
|
| 29 |
+
self.audio_mean = args.audio_mean
|
| 30 |
+
self.audio_std = args.audio_std
|
| 31 |
+
self.mean = []
|
| 32 |
+
self.std = []
|
| 33 |
+
# mean=-4.2677393
|
| 34 |
+
# std=4.5689974
|
| 35 |
+
# self.norm = transforms.Normalize(mean=self.audio_mean, std=self.audio_std)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def __call__(self, audio_data_and_origin_sr):
|
| 39 |
+
audio_data, origin_sr = audio_data_and_origin_sr
|
| 40 |
+
if self.sample_rate != origin_sr:
|
| 41 |
+
# print(audio_data.shape, origin_sr)
|
| 42 |
+
audio_data = torchaudio.functional.resample(audio_data, orig_freq=origin_sr, new_freq=self.sample_rate)
|
| 43 |
+
waveform_melspec = self.waveform2melspec(audio_data)
|
| 44 |
+
return waveform_melspec
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def waveform2melspec(self, audio_data):
|
| 48 |
+
mel = self.get_mel(audio_data)
|
| 49 |
+
if mel.shape[0] > self.target_length:
|
| 50 |
+
# split to three parts
|
| 51 |
+
chunk_frames = self.target_length
|
| 52 |
+
total_frames = mel.shape[0]
|
| 53 |
+
ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
|
| 54 |
+
# print('total_frames-chunk_frames:', total_frames-chunk_frames,
|
| 55 |
+
# 'len(audio_data):', len(audio_data),
|
| 56 |
+
# 'chunk_frames:', chunk_frames,
|
| 57 |
+
# 'total_frames:', total_frames)
|
| 58 |
+
if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk
|
| 59 |
+
ranges[1] = [0]
|
| 60 |
+
if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk
|
| 61 |
+
ranges[2] = [0]
|
| 62 |
+
# randomly choose index for each part
|
| 63 |
+
idx_front = np.random.choice(ranges[0])
|
| 64 |
+
idx_middle = np.random.choice(ranges[1])
|
| 65 |
+
idx_back = np.random.choice(ranges[2])
|
| 66 |
+
# idx_front = ranges[0][0] # fixed
|
| 67 |
+
# idx_middle = ranges[1][0]
|
| 68 |
+
# idx_back = ranges[2][0]
|
| 69 |
+
# select mel
|
| 70 |
+
mel_chunk_front = mel[idx_front:idx_front + chunk_frames, :]
|
| 71 |
+
mel_chunk_middle = mel[idx_middle:idx_middle + chunk_frames, :]
|
| 72 |
+
mel_chunk_back = mel[idx_back:idx_back + chunk_frames, :]
|
| 73 |
+
# print(total_frames, idx_front, idx_front + chunk_frames, idx_middle, idx_middle + chunk_frames, idx_back, idx_back + chunk_frames)
|
| 74 |
+
# stack
|
| 75 |
+
mel_fusion = torch.stack([mel_chunk_front, mel_chunk_middle, mel_chunk_back], dim=0)
|
| 76 |
+
elif mel.shape[0] < self.target_length: # padding if too short
|
| 77 |
+
n_repeat = int(self.target_length / mel.shape[0]) + 1
|
| 78 |
+
# print(self.target_length, mel.shape[0], n_repeat)
|
| 79 |
+
mel = mel.repeat(n_repeat, 1)[:self.target_length, :]
|
| 80 |
+
mel_fusion = torch.stack([mel, mel, mel], dim=0)
|
| 81 |
+
else: # if equal
|
| 82 |
+
mel_fusion = torch.stack([mel, mel, mel], dim=0)
|
| 83 |
+
mel_fusion = mel_fusion.transpose(1, 2) # [3, target_length, mel_bins] -> [3, mel_bins, target_length]
|
| 84 |
+
|
| 85 |
+
# self.mean.append(mel_fusion.mean())
|
| 86 |
+
# self.std.append(mel_fusion.std())
|
| 87 |
+
mel_fusion = (mel_fusion - self.audio_mean) / (self.audio_std * 2)
|
| 88 |
+
return mel_fusion
|
| 89 |
+
|
| 90 |
+
def get_mel(self, audio_data):
|
| 91 |
+
# mel shape: (n_mels, T)
|
| 92 |
+
audio_data -= audio_data.mean()
|
| 93 |
+
mel = torchaudio.compliance.kaldi.fbank(
|
| 94 |
+
audio_data,
|
| 95 |
+
htk_compat=True,
|
| 96 |
+
sample_frequency=self.sample_rate,
|
| 97 |
+
use_energy=False,
|
| 98 |
+
window_type="hanning",
|
| 99 |
+
num_mel_bins=self.num_mel_bins,
|
| 100 |
+
dither=0.0,
|
| 101 |
+
frame_length=25,
|
| 102 |
+
frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS,
|
| 103 |
+
)
|
| 104 |
+
return mel # (T, n_mels)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_audio_transform(args):
|
| 109 |
+
return AudioTransform(args)
|
| 110 |
+
|
| 111 |
+
def load_and_transform_audio(
|
| 112 |
+
audio_path,
|
| 113 |
+
transform,
|
| 114 |
+
):
|
| 115 |
+
waveform_and_sr = torchaudio_loader(audio_path)
|
| 116 |
+
audio_outputs = transform(waveform_and_sr)
|
| 117 |
+
|
| 118 |
+
return audio_outputs
|
RAG/Knowledge_Database/languagebind_main/data/process_depth.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import PIL
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from open_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def opencv_loader(path):
|
| 12 |
+
return cv2.imread(path, cv2.IMREAD_UNCHANGED).astype('float32')
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DepthNorm(nn.Module):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
max_depth=0,
|
| 19 |
+
min_depth=0.01,
|
| 20 |
+
):
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.max_depth = max_depth
|
| 23 |
+
self.min_depth = min_depth
|
| 24 |
+
self.scale = 1000.0 # nyuv2 abs.depth
|
| 25 |
+
|
| 26 |
+
def forward(self, image):
|
| 27 |
+
# image = np.array(image)
|
| 28 |
+
depth_img = image / self.scale # (H, W) in meters
|
| 29 |
+
depth_img = depth_img.clip(min=self.min_depth)
|
| 30 |
+
if self.max_depth != 0:
|
| 31 |
+
depth_img = depth_img.clip(max=self.max_depth)
|
| 32 |
+
depth_img /= self.max_depth # 0-1
|
| 33 |
+
else:
|
| 34 |
+
depth_img /= depth_img.max()
|
| 35 |
+
depth_img = torch.from_numpy(depth_img).unsqueeze(0).repeat(3, 1, 1) # assume image
|
| 36 |
+
return depth_img.to(torch.get_default_dtype())
|
| 37 |
+
|
| 38 |
+
def get_depth_transform(args):
|
| 39 |
+
transform = transforms.Compose(
|
| 40 |
+
[
|
| 41 |
+
DepthNorm(max_depth=args.max_depth),
|
| 42 |
+
transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 43 |
+
transforms.CenterCrop(224),
|
| 44 |
+
transforms.Normalize(OPENAI_DATASET_MEAN, OPENAI_DATASET_STD), # assume image
|
| 45 |
+
# transforms.Normalize((0.5, ), (0.5, )) # 0-1 to norm distribution
|
| 46 |
+
# transforms.Normalize((0.0418, ), (0.0295, )) # sun rgb-d imagebind
|
| 47 |
+
# transforms.Normalize((0.02, ), (0.00295, )) # nyuv2
|
| 48 |
+
]
|
| 49 |
+
)
|
| 50 |
+
return transform
|
| 51 |
+
|
| 52 |
+
def load_and_transform_depth(depth_path, transform):
|
| 53 |
+
depth = opencv_loader(depth_path)
|
| 54 |
+
depth_outputs = transform(depth)
|
| 55 |
+
return {'pixel_values': depth_outputs}
|
RAG/Knowledge_Database/languagebind_main/data/process_image.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
|
| 3 |
+
from open_clip import image_transform, OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def image_loader(path):
|
| 7 |
+
return Image.open(path)
|
| 8 |
+
|
| 9 |
+
def get_image_transform(args):
|
| 10 |
+
preprocess_val = image_transform(
|
| 11 |
+
args.image_size,
|
| 12 |
+
is_train=False,
|
| 13 |
+
mean=OPENAI_DATASET_MEAN,
|
| 14 |
+
std=OPENAI_DATASET_STD,
|
| 15 |
+
)
|
| 16 |
+
return preprocess_val
|
| 17 |
+
|
| 18 |
+
def load_and_transform_image(
|
| 19 |
+
image_path,
|
| 20 |
+
transform,
|
| 21 |
+
):
|
| 22 |
+
image = image_loader(image_path)
|
| 23 |
+
image_outputs = transform(image)
|
| 24 |
+
|
| 25 |
+
return {'pixel_values': image_outputs}
|
RAG/Knowledge_Database/languagebind_main/data/process_text.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import gzip
|
| 5 |
+
import html
|
| 6 |
+
import io
|
| 7 |
+
from functools import lru_cache
|
| 8 |
+
from typing import List, Tuple
|
| 9 |
+
|
| 10 |
+
import ftfy
|
| 11 |
+
import regex as re
|
| 12 |
+
from iopath.common.file_io import g_pathmgr
|
| 13 |
+
BPE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
| 14 |
+
|
| 15 |
+
# Modified from github.com/openai/CLIP
|
| 16 |
+
@lru_cache()
|
| 17 |
+
def bytes_to_unicode():
|
| 18 |
+
"""
|
| 19 |
+
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 20 |
+
The reversible bpe codes work on unicode strings.
|
| 21 |
+
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
| 22 |
+
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
| 23 |
+
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
| 24 |
+
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
| 25 |
+
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 26 |
+
"""
|
| 27 |
+
bs = (
|
| 28 |
+
list(range(ord("!"), ord("~") + 1))
|
| 29 |
+
+ list(range(ord("¡"), ord("¬") + 1))
|
| 30 |
+
+ list(range(ord("®"), ord("ÿ") + 1))
|
| 31 |
+
)
|
| 32 |
+
cs = bs[:]
|
| 33 |
+
n = 0
|
| 34 |
+
for b in range(2**8):
|
| 35 |
+
if b not in bs:
|
| 36 |
+
bs.append(b)
|
| 37 |
+
cs.append(2**8 + n)
|
| 38 |
+
n += 1
|
| 39 |
+
cs = [chr(n) for n in cs]
|
| 40 |
+
return dict(zip(bs, cs))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_pairs(word):
|
| 44 |
+
"""Return set of symbol pairs in a word.
|
| 45 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 46 |
+
"""
|
| 47 |
+
pairs = set()
|
| 48 |
+
prev_char = word[0]
|
| 49 |
+
for char in word[1:]:
|
| 50 |
+
pairs.add((prev_char, char))
|
| 51 |
+
prev_char = char
|
| 52 |
+
return pairs
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def basic_clean(text):
|
| 56 |
+
text = ftfy.fix_text(text)
|
| 57 |
+
text = html.unescape(html.unescape(text))
|
| 58 |
+
return text.strip()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def whitespace_clean(text):
|
| 62 |
+
text = re.sub(r"\s+", " ", text)
|
| 63 |
+
text = text.strip()
|
| 64 |
+
return text
|
| 65 |
+
|
| 66 |
+
class SimpleTokenizer(object):
|
| 67 |
+
def __init__(self, bpe_path: str, context_length=77):
|
| 68 |
+
self.byte_encoder = bytes_to_unicode()
|
| 69 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 70 |
+
|
| 71 |
+
with g_pathmgr.open(bpe_path, "rb") as fh:
|
| 72 |
+
bpe_bytes = io.BytesIO(fh.read())
|
| 73 |
+
merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
|
| 74 |
+
merges = merges[1 : 49152 - 256 - 2 + 1]
|
| 75 |
+
merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges]
|
| 76 |
+
vocab = list(bytes_to_unicode().values())
|
| 77 |
+
vocab = vocab + [v + "</w>" for v in vocab]
|
| 78 |
+
for merge in merges:
|
| 79 |
+
vocab.append("".join(merge))
|
| 80 |
+
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
|
| 81 |
+
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 82 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 83 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 84 |
+
self.cache = {
|
| 85 |
+
"<|startoftext|>": "<|startoftext|>",
|
| 86 |
+
"<|endoftext|>": "<|endoftext|>",
|
| 87 |
+
}
|
| 88 |
+
self.pat = re.compile(
|
| 89 |
+
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
|
| 90 |
+
re.IGNORECASE,
|
| 91 |
+
)
|
| 92 |
+
self.context_length = context_length
|
| 93 |
+
|
| 94 |
+
def bpe(self, token):
|
| 95 |
+
if token in self.cache:
|
| 96 |
+
return self.cache[token]
|
| 97 |
+
word = tuple(token[:-1]) + (token[-1] + "</w>",)
|
| 98 |
+
pairs = get_pairs(word)
|
| 99 |
+
|
| 100 |
+
if not pairs:
|
| 101 |
+
return token + "</w>"
|
| 102 |
+
|
| 103 |
+
while True:
|
| 104 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
| 105 |
+
if bigram not in self.bpe_ranks:
|
| 106 |
+
break
|
| 107 |
+
first, second = bigram
|
| 108 |
+
new_word = []
|
| 109 |
+
i = 0
|
| 110 |
+
while i < len(word):
|
| 111 |
+
try:
|
| 112 |
+
j = word.index(first, i)
|
| 113 |
+
new_word.extend(word[i:j])
|
| 114 |
+
i = j
|
| 115 |
+
except:
|
| 116 |
+
new_word.extend(word[i:])
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 120 |
+
new_word.append(first + second)
|
| 121 |
+
i += 2
|
| 122 |
+
else:
|
| 123 |
+
new_word.append(word[i])
|
| 124 |
+
i += 1
|
| 125 |
+
new_word = tuple(new_word)
|
| 126 |
+
word = new_word
|
| 127 |
+
if len(word) == 1:
|
| 128 |
+
break
|
| 129 |
+
else:
|
| 130 |
+
pairs = get_pairs(word)
|
| 131 |
+
word = " ".join(word)
|
| 132 |
+
self.cache[token] = word
|
| 133 |
+
return word
|
| 134 |
+
|
| 135 |
+
def encode(self, text):
|
| 136 |
+
bpe_tokens = []
|
| 137 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 138 |
+
for token in re.findall(self.pat, text):
|
| 139 |
+
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
|
| 140 |
+
bpe_tokens.extend(
|
| 141 |
+
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
|
| 142 |
+
)
|
| 143 |
+
return bpe_tokens
|
| 144 |
+
|
| 145 |
+
def decode(self, tokens):
|
| 146 |
+
text = "".join([self.decoder[token] for token in tokens])
|
| 147 |
+
text = (
|
| 148 |
+
bytearray([self.byte_decoder[c] for c in text])
|
| 149 |
+
.decode("utf-8", errors="replace")
|
| 150 |
+
.replace("</w>", " ")
|
| 151 |
+
)
|
| 152 |
+
return text
|
| 153 |
+
|
| 154 |
+
def __call__(self, texts, context_length=None):
|
| 155 |
+
if not context_length:
|
| 156 |
+
context_length = self.context_length
|
| 157 |
+
|
| 158 |
+
if isinstance(texts, str):
|
| 159 |
+
texts = [texts]
|
| 160 |
+
|
| 161 |
+
sot_token = self.encoder["<|startoftext|>"]
|
| 162 |
+
eot_token = self.encoder["<|endoftext|>"]
|
| 163 |
+
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
|
| 164 |
+
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
| 165 |
+
|
| 166 |
+
for i, tokens in enumerate(all_tokens):
|
| 167 |
+
tokens = tokens[:context_length]
|
| 168 |
+
result[i, : len(tokens)] = torch.tensor(tokens)
|
| 169 |
+
|
| 170 |
+
if len(result) == 1:
|
| 171 |
+
return result[0]
|
| 172 |
+
return result
|
| 173 |
+
|
| 174 |
+
def clean_youtube(text, is_tags=False):
|
| 175 |
+
text = text.lower() + ' '
|
| 176 |
+
text = re.sub(
|
| 177 |
+
r'#video|video|#shorts|shorts| shorts|#short| short|#youtubeshorts|youtubeshorts|#youtube| youtube|#shortsyoutube|#ytshorts|ytshorts|#ytshort|#shortvideo|shortvideo|#shortsfeed|#tiktok|tiktok|#tiktokchallenge|#myfirstshorts|#myfirstshort|#viral|viralvideo|viral|#viralshorts|#virlshort|#ytviralshorts',
|
| 178 |
+
' ', text)
|
| 179 |
+
text = re.sub(r' s |short|youtube|virlshort|#', ' ', text)
|
| 180 |
+
pattern = r'[^a-zA-Z0-9\s\.,;:?!\'\"|]'
|
| 181 |
+
if is_tags:
|
| 182 |
+
pattern = r'[^a-zA-Z0-9\s]'
|
| 183 |
+
text = re.sub(pattern, '', text)
|
| 184 |
+
text = whitespace_clean(basic_clean(text))
|
| 185 |
+
return text
|
| 186 |
+
|
| 187 |
+
def load_and_transform_text(text, tokenizer, title=True):
|
| 188 |
+
if title:
|
| 189 |
+
title_hashtags = text.split('#')
|
| 190 |
+
title, hashtags = title_hashtags[0], '#' + '#'.join(title_hashtags[1:])
|
| 191 |
+
title = clean_youtube(title)
|
| 192 |
+
hashtags = clean_youtube(hashtags, is_tags=True)
|
| 193 |
+
text = title + ', ' + hashtags
|
| 194 |
+
if text == '' or text.isspace():
|
| 195 |
+
raise ValueError('text is empty')
|
| 196 |
+
input_ids, attention_mask = tokenizer(text)
|
| 197 |
+
return {'input_ids': input_ids.squeeze(), 'attention_mask': attention_mask.squeeze()}
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
if __name__ == '__main__':
|
| 202 |
+
load_and_transform_text("bpe/bpe_simple_vocab_16e6.txt.gz")
|
RAG/Knowledge_Database/languagebind_main/data/process_thermal.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import PIL
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from open_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_thermal_transform(args):
|
| 13 |
+
transform = transforms.Compose(
|
| 14 |
+
[
|
| 15 |
+
transforms.ToTensor(),
|
| 16 |
+
transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 17 |
+
transforms.CenterCrop(224),
|
| 18 |
+
transforms.Normalize(OPENAI_DATASET_MEAN, OPENAI_DATASET_STD) # assume image
|
| 19 |
+
]
|
| 20 |
+
)
|
| 21 |
+
return transform
|
| 22 |
+
|
| 23 |
+
def load_and_transform_thermal(thermal_path, transform):
|
| 24 |
+
thermal = Image.open(thermal_path)
|
| 25 |
+
thermal_outputs = transform(thermal)
|
| 26 |
+
return {'pixel_values': thermal_outputs}
|
RAG/Knowledge_Database/languagebind_main/data/process_video.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import io
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import decord
|
| 10 |
+
import torchvision.transforms
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from decord import VideoReader, cpu
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
from petrel_client.client import Client
|
| 16 |
+
petrel_backend_imported = True
|
| 17 |
+
except (ImportError, ModuleNotFoundError):
|
| 18 |
+
petrel_backend_imported = False
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
from pytorchvideo.data.encoded_video import EncodedVideo
|
| 22 |
+
from torchvision.transforms import Compose, Lambda, ToTensor
|
| 23 |
+
from torchvision.transforms._transforms_video import NormalizeVideo, RandomCropVideo, RandomHorizontalFlipVideo
|
| 24 |
+
from pytorchvideo.transforms import ApplyTransformToKey, ShortSideScale, UniformTemporalSubsample
|
| 25 |
+
import sys
|
| 26 |
+
sys.path.append('../')
|
| 27 |
+
from open_clip import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 28 |
+
from os.path import join as opj
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_video_loader(use_petrel_backend: bool = True,
|
| 32 |
+
enable_mc: bool = True,
|
| 33 |
+
conf_path: str = None):
|
| 34 |
+
if petrel_backend_imported and use_petrel_backend:
|
| 35 |
+
_client = Client(conf_path=conf_path, enable_mc=enable_mc)
|
| 36 |
+
else:
|
| 37 |
+
_client = None
|
| 38 |
+
|
| 39 |
+
def _loader(video_path):
|
| 40 |
+
if _client is not None and 's3:' in video_path:
|
| 41 |
+
video_path = io.BytesIO(_client.get(video_path))
|
| 42 |
+
|
| 43 |
+
vr = VideoReader(video_path, num_threads=1, ctx=cpu(0))
|
| 44 |
+
return vr
|
| 45 |
+
|
| 46 |
+
return _loader
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
decord.bridge.set_bridge('torch')
|
| 50 |
+
# video_loader = get_video_loader()
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def get_video_transform(args):
|
| 54 |
+
if args.video_decode_backend == 'pytorchvideo':
|
| 55 |
+
transform = ApplyTransformToKey(
|
| 56 |
+
key="video",
|
| 57 |
+
transform=Compose(
|
| 58 |
+
[
|
| 59 |
+
UniformTemporalSubsample(args.num_frames),
|
| 60 |
+
Lambda(lambda x: x / 255.0),
|
| 61 |
+
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
|
| 62 |
+
ShortSideScale(size=224),
|
| 63 |
+
RandomCropVideo(size=224),
|
| 64 |
+
RandomHorizontalFlipVideo(p=0.5),
|
| 65 |
+
]
|
| 66 |
+
),
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
elif args.video_decode_backend == 'decord':
|
| 70 |
+
|
| 71 |
+
transform = Compose(
|
| 72 |
+
[
|
| 73 |
+
# UniformTemporalSubsample(num_frames),
|
| 74 |
+
Lambda(lambda x: x / 255.0),
|
| 75 |
+
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
|
| 76 |
+
ShortSideScale(size=224),
|
| 77 |
+
RandomCropVideo(size=224),
|
| 78 |
+
RandomHorizontalFlipVideo(p=0.5),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
elif args.video_decode_backend == 'opencv':
|
| 83 |
+
transform = Compose(
|
| 84 |
+
[
|
| 85 |
+
# UniformTemporalSubsample(num_frames),
|
| 86 |
+
Lambda(lambda x: x / 255.0),
|
| 87 |
+
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
|
| 88 |
+
ShortSideScale(size=224),
|
| 89 |
+
RandomCropVideo(size=224),
|
| 90 |
+
RandomHorizontalFlipVideo(p=0.5),
|
| 91 |
+
]
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
elif args.video_decode_backend == 'imgs':
|
| 95 |
+
transform = Compose(
|
| 96 |
+
[
|
| 97 |
+
# UniformTemporalSubsample(num_frames),
|
| 98 |
+
# Lambda(lambda x: x / 255.0),
|
| 99 |
+
NormalizeVideo(mean=OPENAI_DATASET_MEAN, std=OPENAI_DATASET_STD),
|
| 100 |
+
ShortSideScale(size=224),
|
| 101 |
+
RandomCropVideo(size=224),
|
| 102 |
+
RandomHorizontalFlipVideo(p=0.5),
|
| 103 |
+
]
|
| 104 |
+
)
|
| 105 |
+
else:
|
| 106 |
+
raise NameError('video_decode_backend should specify in (pytorchvideo, decord, opencv, imgs)')
|
| 107 |
+
return transform
|
| 108 |
+
|
| 109 |
+
def load_and_transform_video(
|
| 110 |
+
video_path,
|
| 111 |
+
transform,
|
| 112 |
+
video_decode_backend='opencv',
|
| 113 |
+
clip_start_sec=0.0,
|
| 114 |
+
clip_end_sec=None,
|
| 115 |
+
num_frames=8,
|
| 116 |
+
):
|
| 117 |
+
if video_decode_backend == 'pytorchvideo':
|
| 118 |
+
# decord pyav
|
| 119 |
+
video = EncodedVideo.from_path(video_path, decoder="decord", decode_audio=False)
|
| 120 |
+
duration = video.duration
|
| 121 |
+
start_sec = clip_start_sec # secs
|
| 122 |
+
end_sec = clip_end_sec if clip_end_sec is not None else duration # secs
|
| 123 |
+
video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
|
| 124 |
+
video_outputs = transform(video_data)
|
| 125 |
+
|
| 126 |
+
elif video_decode_backend == 'decord':
|
| 127 |
+
decord_vr = VideoReader(video_path, ctx=cpu(0))
|
| 128 |
+
duration = len(decord_vr)
|
| 129 |
+
frame_id_list = np.linspace(0, duration-1, num_frames, dtype=int)
|
| 130 |
+
video_data = decord_vr.get_batch(frame_id_list)
|
| 131 |
+
video_data = video_data.permute(3, 0, 1, 2) # (T, H, W, C) -> (C, T, H, W)
|
| 132 |
+
video_outputs = transform(video_data)
|
| 133 |
+
|
| 134 |
+
elif video_decode_backend == 'opencv':
|
| 135 |
+
cv2_vr = cv2.VideoCapture(video_path)
|
| 136 |
+
duration = int(cv2_vr.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 137 |
+
frame_id_list = np.linspace(0, duration-1, num_frames, dtype=int)
|
| 138 |
+
|
| 139 |
+
video_data = []
|
| 140 |
+
for frame_idx in frame_id_list:
|
| 141 |
+
cv2_vr.set(1, frame_idx)
|
| 142 |
+
_, frame = cv2_vr.read()
|
| 143 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 144 |
+
video_data.append(torch.from_numpy(frame).permute(2, 0, 1))
|
| 145 |
+
cv2_vr.release()
|
| 146 |
+
video_data = torch.stack(video_data, dim=1)
|
| 147 |
+
video_outputs = transform(video_data)
|
| 148 |
+
|
| 149 |
+
elif video_decode_backend == 'imgs':
|
| 150 |
+
resize256_folder = video_path.replace('.mp4', '_resize256_folder')
|
| 151 |
+
video_data = [ToTensor()(Image.open(opj(resize256_folder, f'{i}.jpg'))) for i in range(8)]
|
| 152 |
+
video_data = torch.stack(video_data, dim=1)
|
| 153 |
+
# print(video_data.shape, video_data.max(), video_data.min())
|
| 154 |
+
video_outputs = transform(video_data)
|
| 155 |
+
|
| 156 |
+
else:
|
| 157 |
+
raise NameError('video_decode_backend should specify in (pytorchvideo, decord, opencv, imgs)')
|
| 158 |
+
return {'pixel_values': video_outputs}
|
| 159 |
+
|
| 160 |
+
if __name__ == '__main__':
|
| 161 |
+
load_and_transform_video(r"D:\ONE-PEACE-main\lb_test\zHSOYcZblvY.mp4")
|
RAG/Knowledge_Database/languagebind_main/gradio_app.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import argparse
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from languagebind import LanguageBind, transform_dict, LanguageBindImageTokenizer, to_device
|
| 10 |
+
|
| 11 |
+
code_highlight_css = (
|
| 12 |
+
"""
|
| 13 |
+
#chatbot .hll { background-color: #ffffcc }
|
| 14 |
+
#chatbot .c { color: #408080; font-style: italic }
|
| 15 |
+
#chatbot .err { border: 1px solid #FF0000 }
|
| 16 |
+
#chatbot .k { color: #008000; font-weight: bold }
|
| 17 |
+
#chatbot .o { color: #666666 }
|
| 18 |
+
#chatbot .ch { color: #408080; font-style: italic }
|
| 19 |
+
#chatbot .cm { color: #408080; font-style: italic }
|
| 20 |
+
#chatbot .cp { color: #BC7A00 }
|
| 21 |
+
#chatbot .cpf { color: #408080; font-style: italic }
|
| 22 |
+
#chatbot .c1 { color: #408080; font-style: italic }
|
| 23 |
+
#chatbot .cs { color: #408080; font-style: italic }
|
| 24 |
+
#chatbot .gd { color: #A00000 }
|
| 25 |
+
#chatbot .ge { font-style: italic }
|
| 26 |
+
#chatbot .gr { color: #FF0000 }
|
| 27 |
+
#chatbot .gh { color: #000080; font-weight: bold }
|
| 28 |
+
#chatbot .gi { color: #00A000 }
|
| 29 |
+
#chatbot .go { color: #888888 }
|
| 30 |
+
#chatbot .gp { color: #000080; font-weight: bold }
|
| 31 |
+
#chatbot .gs { font-weight: bold }
|
| 32 |
+
#chatbot .gu { color: #800080; font-weight: bold }
|
| 33 |
+
#chatbot .gt { color: #0044DD }
|
| 34 |
+
#chatbot .kc { color: #008000; font-weight: bold }
|
| 35 |
+
#chatbot .kd { color: #008000; font-weight: bold }
|
| 36 |
+
#chatbot .kn { color: #008000; font-weight: bold }
|
| 37 |
+
#chatbot .kp { color: #008000 }
|
| 38 |
+
#chatbot .kr { color: #008000; font-weight: bold }
|
| 39 |
+
#chatbot .kt { color: #B00040 }
|
| 40 |
+
#chatbot .m { color: #666666 }
|
| 41 |
+
#chatbot .s { color: #BA2121 }
|
| 42 |
+
#chatbot .na { color: #7D9029 }
|
| 43 |
+
#chatbot .nb { color: #008000 }
|
| 44 |
+
#chatbot .nc { color: #0000FF; font-weight: bold }
|
| 45 |
+
#chatbot .no { color: #880000 }
|
| 46 |
+
#chatbot .nd { color: #AA22FF }
|
| 47 |
+
#chatbot .ni { color: #999999; font-weight: bold }
|
| 48 |
+
#chatbot .ne { color: #D2413A; font-weight: bold }
|
| 49 |
+
#chatbot .nf { color: #0000FF }
|
| 50 |
+
#chatbot .nl { color: #A0A000 }
|
| 51 |
+
#chatbot .nn { color: #0000FF; font-weight: bold }
|
| 52 |
+
#chatbot .nt { color: #008000; font-weight: bold }
|
| 53 |
+
#chatbot .nv { color: #19177C }
|
| 54 |
+
#chatbot .ow { color: #AA22FF; font-weight: bold }
|
| 55 |
+
#chatbot .w { color: #bbbbbb }
|
| 56 |
+
#chatbot .mb { color: #666666 }
|
| 57 |
+
#chatbot .mf { color: #666666 }
|
| 58 |
+
#chatbot .mh { color: #666666 }
|
| 59 |
+
#chatbot .mi { color: #666666 }
|
| 60 |
+
#chatbot .mo { color: #666666 }
|
| 61 |
+
#chatbot .sa { color: #BA2121 }
|
| 62 |
+
#chatbot .sb { color: #BA2121 }
|
| 63 |
+
#chatbot .sc { color: #BA2121 }
|
| 64 |
+
#chatbot .dl { color: #BA2121 }
|
| 65 |
+
#chatbot .sd { color: #BA2121; font-style: italic }
|
| 66 |
+
#chatbot .s2 { color: #BA2121 }
|
| 67 |
+
#chatbot .se { color: #BB6622; font-weight: bold }
|
| 68 |
+
#chatbot .sh { color: #BA2121 }
|
| 69 |
+
#chatbot .si { color: #BB6688; font-weight: bold }
|
| 70 |
+
#chatbot .sx { color: #008000 }
|
| 71 |
+
#chatbot .sr { color: #BB6688 }
|
| 72 |
+
#chatbot .s1 { color: #BA2121 }
|
| 73 |
+
#chatbot .ss { color: #19177C }
|
| 74 |
+
#chatbot .bp { color: #008000 }
|
| 75 |
+
#chatbot .fm { color: #0000FF }
|
| 76 |
+
#chatbot .vc { color: #19177C }
|
| 77 |
+
#chatbot .vg { color: #19177C }
|
| 78 |
+
#chatbot .vi { color: #19177C }
|
| 79 |
+
#chatbot .vm { color: #19177C }
|
| 80 |
+
#chatbot .il { color: #666666 }
|
| 81 |
+
""")
|
| 82 |
+
#.highlight { background: #f8f8f8; }
|
| 83 |
+
|
| 84 |
+
title_markdown = ("""
|
| 85 |
+
<div style="display: flex; justify-content: center;">
|
| 86 |
+
<a href="https://github.com/PKU-YuanGroup/LanguageBind">
|
| 87 |
+
<img src="https://z1.ax1x.com/2023/10/16/piCuiDS.png" alt="LanguageBind🚀" border="0" style="height: 200px; margin-right: 20px;">
|
| 88 |
+
</a>
|
| 89 |
+
<a href="https://github.com/PKU-YuanGroup/LanguageBind">
|
| 90 |
+
<img src="https://z1.ax1x.com/2023/11/04/piMLoQ0.png" style="height: 200px;">
|
| 91 |
+
</a>
|
| 92 |
+
</div>
|
| 93 |
+
<h2 align="center"> LanguageBind: Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment </h2>
|
| 94 |
+
|
| 95 |
+
<h5 align="center"> If you like our project, please give us a star ✨ on Github for latest update. </h2>
|
| 96 |
+
|
| 97 |
+
<div align="center">
|
| 98 |
+
<div style="display:flex; gap: 0.25rem;" align="center">
|
| 99 |
+
<a href='https://github.com/PKU-YuanGroup/LanguageBind'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
|
| 100 |
+
<a href="https://arxiv.org/pdf/2310.01852.pdf"><img src="https://img.shields.io/badge/Arxiv-2310.01852-red"></a>
|
| 101 |
+
<a href='https://github.com/PKU-YuanGroup/LanguageBind/stargazers'><img src='https://img.shields.io/github/stars/PKU-YuanGroup/LanguageBind.svg?style=social'></a>
|
| 102 |
+
</div>
|
| 103 |
+
</div>
|
| 104 |
+
""")
|
| 105 |
+
css = code_highlight_css + """
|
| 106 |
+
pre {
|
| 107 |
+
white-space: pre-wrap; /* Since CSS 2.1 */
|
| 108 |
+
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
| 109 |
+
white-space: -pre-wrap; /* Opera 4-6 */
|
| 110 |
+
white-space: -o-pre-wrap; /* Opera 7 */
|
| 111 |
+
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
| 112 |
+
}
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def image_to_language(image, language):
|
| 117 |
+
inputs = {}
|
| 118 |
+
inputs['image'] = to_device(modality_transform['image'](image), device)
|
| 119 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
| 120 |
+
truncation=True, return_tensors='pt'), device)
|
| 121 |
+
with torch.no_grad():
|
| 122 |
+
embeddings = model(inputs)
|
| 123 |
+
return (embeddings['image'] @ embeddings['language'].T).item()
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def video_to_language(video, language):
|
| 127 |
+
inputs = {}
|
| 128 |
+
inputs['video'] = to_device(modality_transform['video'](video), device)
|
| 129 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
| 130 |
+
truncation=True, return_tensors='pt'), device)
|
| 131 |
+
with torch.no_grad():
|
| 132 |
+
embeddings = model(inputs)
|
| 133 |
+
return (embeddings['video'] @ embeddings['language'].T).item()
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def audio_to_language(audio, language):
|
| 137 |
+
inputs = {}
|
| 138 |
+
inputs['audio'] = to_device(modality_transform['audio'](audio), device)
|
| 139 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
| 140 |
+
truncation=True, return_tensors='pt'), device)
|
| 141 |
+
with torch.no_grad():
|
| 142 |
+
embeddings = model(inputs)
|
| 143 |
+
return (embeddings['audio'] @ embeddings['language'].T).item()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def depth_to_language(depth, language):
|
| 147 |
+
inputs = {}
|
| 148 |
+
inputs['depth'] = to_device(modality_transform['depth'](depth.name), device)
|
| 149 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
| 150 |
+
truncation=True, return_tensors='pt'), device)
|
| 151 |
+
with torch.no_grad():
|
| 152 |
+
embeddings = model(inputs)
|
| 153 |
+
return (embeddings['depth'] @ embeddings['language'].T).item()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def thermal_to_language(thermal, language):
|
| 157 |
+
inputs = {}
|
| 158 |
+
inputs['thermal'] = to_device(modality_transform['thermal'](thermal), device)
|
| 159 |
+
inputs['language'] = to_device(modality_transform['language'](language, max_length=77, padding='max_length',
|
| 160 |
+
truncation=True, return_tensors='pt'), device)
|
| 161 |
+
with torch.no_grad():
|
| 162 |
+
embeddings = model(inputs)
|
| 163 |
+
return (embeddings['thermal'] @ embeddings['language'].T).item()
|
| 164 |
+
|
| 165 |
+
if __name__ == '__main__':
|
| 166 |
+
device = 'cuda:0'
|
| 167 |
+
device = torch.device(device)
|
| 168 |
+
clip_type = {
|
| 169 |
+
'video': 'LanguageBind_Video_FT', # also LanguageBind_Video
|
| 170 |
+
'audio': 'LanguageBind_Audio_FT', # also LanguageBind_Audio
|
| 171 |
+
'thermal': 'LanguageBind_Thermal',
|
| 172 |
+
'image': 'LanguageBind_Image',
|
| 173 |
+
'depth': 'LanguageBind_Depth',
|
| 174 |
+
}
|
| 175 |
+
model = LanguageBind(clip_type=clip_type, use_temp=False)
|
| 176 |
+
model = model.to(device)
|
| 177 |
+
model.eval()
|
| 178 |
+
pretrained_ckpt = f'lb203/LanguageBind_Image'
|
| 179 |
+
tokenizer = LanguageBindImageTokenizer.from_pretrained(pretrained_ckpt, cache_dir='./cache_dir/tokenizer_cache_dir')
|
| 180 |
+
modality_transform = {c: transform_dict[c](model.modality_config[c]) for c in clip_type}
|
| 181 |
+
modality_transform['language'] = tokenizer
|
| 182 |
+
|
| 183 |
+
with gr.Blocks(title="LanguageBind🚀", css=css) as demo:
|
| 184 |
+
gr.Markdown(title_markdown)
|
| 185 |
+
with gr.Row():
|
| 186 |
+
with gr.Column():
|
| 187 |
+
image = gr.Image(type="filepath", height=224, width=224, label='Image Input')
|
| 188 |
+
language_i = gr.Textbox(lines=2, label='Text Input')
|
| 189 |
+
out_i = gr.Textbox(label='Similarity of Image to Text')
|
| 190 |
+
b_i = gr.Button("Calculate similarity of Image to Text")
|
| 191 |
+
with gr.Column():
|
| 192 |
+
video = gr.Video(type="filepath", height=224, width=224, label='Video Input')
|
| 193 |
+
language_v = gr.Textbox(lines=2, label='Text Input')
|
| 194 |
+
out_v = gr.Textbox(label='Similarity of Video to Text')
|
| 195 |
+
b_v = gr.Button("Calculate similarity of Video to Text")
|
| 196 |
+
with gr.Column():
|
| 197 |
+
audio = gr.Audio(type="filepath", label='Audio Input')
|
| 198 |
+
language_a = gr.Textbox(lines=2, label='Text Input')
|
| 199 |
+
out_a = gr.Textbox(label='Similarity of Audio to Text')
|
| 200 |
+
b_a = gr.Button("Calculate similarity of Audio to Text")
|
| 201 |
+
with gr.Row():
|
| 202 |
+
with gr.Column():
|
| 203 |
+
depth = gr.File(height=224, width=224, label='Depth Input, need a .png file, 16 bit, with values ranging from 0-10000 (representing 0-10 metres, but 1000 times)')
|
| 204 |
+
language_d = gr.Textbox(lines=2, label='Text Input')
|
| 205 |
+
out_d = gr.Textbox(label='Similarity of Depth to Text')
|
| 206 |
+
b_d = gr.Button("Calculate similarity of Depth to Text")
|
| 207 |
+
with gr.Column():
|
| 208 |
+
thermal = gr.Image(type="filepath", height=224, width=224, label='Thermal Input, you should first convert to RGB')
|
| 209 |
+
language_t = gr.Textbox(lines=2, label='Text Input')
|
| 210 |
+
out_t = gr.Textbox(label='Similarity of Thermal to Text')
|
| 211 |
+
b_t = gr.Button("Calculate similarity of Thermal to Text")
|
| 212 |
+
|
| 213 |
+
b_i.click(image_to_language, inputs=[image, language_i], outputs=out_i)
|
| 214 |
+
b_a.click(audio_to_language, inputs=[audio, language_a], outputs=out_a)
|
| 215 |
+
b_v.click(video_to_language, inputs=[video, language_v], outputs=out_v)
|
| 216 |
+
b_d.click(depth_to_language, inputs=[depth, language_d], outputs=out_d)
|
| 217 |
+
b_t.click(thermal_to_language, inputs=[thermal, language_t], outputs=out_t)
|
| 218 |
+
|
| 219 |
+
demo.launch()
|
RAG/Knowledge_Database/languagebind_main/i_cls/datasets.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from data.build_datasets import DataInfo
|
| 4 |
+
from open_clip import image_transform, OPENAI_DATASET_STD, OPENAI_DATASET_MEAN, get_tokenizer
|
| 5 |
+
from torchvision import datasets
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_imagenet(args, split):
|
| 9 |
+
assert split in ["val", "v2"]
|
| 10 |
+
preprocess_val = image_transform(
|
| 11 |
+
args.image_size,
|
| 12 |
+
is_train=False,
|
| 13 |
+
mean=OPENAI_DATASET_MEAN,
|
| 14 |
+
std=OPENAI_DATASET_STD,
|
| 15 |
+
)
|
| 16 |
+
if split == "v2":
|
| 17 |
+
from imagenetv2_pytorch import ImageNetV2Dataset
|
| 18 |
+
dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val)
|
| 19 |
+
else:
|
| 20 |
+
data_path = args.imagenet_val
|
| 21 |
+
assert data_path
|
| 22 |
+
dataset = datasets.ImageFolder(data_path, transform=preprocess_val)
|
| 23 |
+
|
| 24 |
+
dataloader = torch.utils.data.DataLoader(
|
| 25 |
+
dataset,
|
| 26 |
+
batch_size=args.batch_size,
|
| 27 |
+
num_workers=args.workers,
|
| 28 |
+
sampler=None,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
return DataInfo(dataloader=dataloader, sampler=None)
|