| | #!/bin/bash |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | set -e |
| |
|
| | |
| | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| | INFERENCE_ROOT="${SCRIPT_DIR}/inference_output" |
| | mkdir -p "$INFERENCE_ROOT" |
| |
|
| | |
| | for arg in "$@"; do |
| | if [ "$arg" == "--benchmark-efficiency" ]; then |
| | |
| | echo "" |
| | echo "=====================================================================" |
| | echo " Efficiency Benchmarking" |
| | echo "=====================================================================" |
| | echo "" |
| | echo "Please run the simple benchmark script instead:" |
| | echo " bash eval/simple_benchmark.sh" |
| | echo "" |
| | exit 0 |
| | fi |
| | done |
| |
|
| | |
| | RED='\033[0;31m' |
| | GREEN='\033[0;32m' |
| | YELLOW='\033[1;33m' |
| | BLUE='\033[0;34m' |
| | NC='\033[0m' |
| |
|
| | |
| | SMKD_CONFIG="${SCRIPT_DIR}/smkd/asllrp_baseline.yaml" |
| | SMKD_MODEL="/research/cbim/vast/sf895/code/Sign-X/output/huggingface_asllrp_repo/SignX/smkd/work_dir第七次训练全pose协助2000/asllrp_smkd/best_model.pt" |
| | GLOSS_DICT="/research/cbim/vast/sf895/code/Sign-X/output/huggingface_asllrp_repo/SignX/smkd/asllrp第七次训练全pose协助2000/gloss_dict.npy" |
| | SLTUNET_CHECKPOINT="/research/cbim/vast/sf895/code/Sign-X/output/huggingface_asllrp_repo/SignX/checkpoints_asllrp第七次训练全pose协助2000/best" |
| | VOCAB_FILE="${SCRIPT_DIR}/preprocessed-asllrp/vocab.asllrp" |
| | BPE_CODES="${SCRIPT_DIR}/preprocessed-asllrp/asllrp.bpe" |
| |
|
| | echo "" |
| | echo "======================================================================" |
| | echo " Sign Language Recognition - Full Inference Pipeline" |
| | echo "======================================================================" |
| | echo "" |
| | echo " Pipeline: Video → [SMKD frozen] → Features → [SLTUNET] → Gloss" |
| | echo " Mode: inference (one-click two-stage execution)" |
| | echo "" |
| | echo "======================================================================" |
| | echo "" |
| |
|
| | |
| | if [ "$#" -lt 1 ]; then |
| | echo -e "${RED}Error: missing video path${NC}" |
| | echo "" |
| | echo "Usage:" |
| | echo " $0 <video_path> [output_path]" |
| | echo " $0 --benchmark-efficiency [options...]" |
| | echo "" |
| | echo "Examples:" |
| | echo " $0 test.mp4" |
| | echo " $0 test.mp4 output.txt" |
| | echo " $0 --benchmark-efficiency --video test.mp4" |
| | echo "" |
| | exit 1 |
| | fi |
| |
|
| | VIDEO_PATH="$1" |
| |
|
| | |
| | if [ -d "$VIDEO_PATH" ]; then |
| | VIDEO_DIR=$(realpath "$VIDEO_PATH") |
| | if [ -n "$2" ]; then |
| | echo -e "${RED}Error: output path override is not supported in batch mode${NC}" |
| | exit 1 |
| | fi |
| |
|
| | echo "" |
| | echo "======================================================================" |
| | echo " Batch Inference Mode" |
| | echo "======================================================================" |
| | echo " Directory: $VIDEO_DIR" |
| | echo " Outputs: stored per-video using default locations" |
| | echo "======================================================================" |
| | echo "" |
| |
|
| | mapfile -d '' VIDEO_FILES < <(find "$VIDEO_DIR" -maxdepth 1 -type f \( -iname '*.mp4' -o -iname '*.mov' -o -iname '*.avi' -o -iname '*.mkv' \) -print0 | sort -z) |
| | if [ ${#VIDEO_FILES[@]} -eq 0 ]; then |
| | echo -e "${RED}Error: no video files (.mp4/.mov/.avi/.mkv) found under $VIDEO_DIR${NC}" |
| | exit 1 |
| | fi |
| |
|
| | batch_status=0 |
| | total=${#VIDEO_FILES[@]} |
| | index=1 |
| | for video_file in "${VIDEO_FILES[@]}"; do |
| | echo "" |
| | echo ">>> [Batch] Processing ($index/$total): $video_file" |
| | if bash "$SCRIPT_DIR/$(basename "${BASH_SOURCE[0]}")" "$video_file"; then |
| | echo ">>> [Batch] Completed: $video_file" |
| | else |
| | echo ">>> [Batch] Failed: $video_file" |
| | batch_status=1 |
| | fi |
| | index=$((index + 1)) |
| | done |
| |
|
| | echo "" |
| | if [ $batch_status -eq 0 ]; then |
| | echo -e "${GREEN}✓ Batch inference finished without errors${NC}" |
| | else |
| | echo -e "${YELLOW}⚠ Batch inference finished with some failures (see logs above)${NC}" |
| | fi |
| | exit $batch_status |
| | fi |
| |
|
| | if [ -z "$2" ]; then |
| | OUTPUT_PATH="$INFERENCE_ROOT/inference_output_$(date +%Y%m%d_%H%M%S)_$RANDOM.txt" |
| | else |
| | OUTPUT_PATH="${2}" |
| | fi |
| |
|
| | |
| | if [ ! -f "$VIDEO_PATH" ]; then |
| | echo -e "${RED}Error: video file not found: $VIDEO_PATH${NC}" |
| | exit 1 |
| | fi |
| |
|
| | |
| | VIDEO_PATH=$(realpath "$VIDEO_PATH") |
| |
|
| | |
| | if [[ "$OUTPUT_PATH" = /* ]]; then |
| | OUTPUT_PATH="$OUTPUT_PATH" |
| | elif [ -f "$OUTPUT_PATH" ]; then |
| | OUTPUT_PATH=$(realpath "$OUTPUT_PATH") |
| | else |
| | OUTPUT_PATH="${INFERENCE_ROOT}/${OUTPUT_PATH}" |
| | fi |
| | OUTPUT_CLEAN_PATH="${OUTPUT_PATH}.clean" |
| |
|
| | echo -e "${BLUE}[Configuration]${NC}" |
| | echo " Input video: $VIDEO_PATH" |
| | echo " Output file: $OUTPUT_PATH" |
| | echo " SMKD model: $SMKD_MODEL" |
| | echo " SLTUNET: $SLTUNET_CHECKPOINT" |
| | echo "" |
| |
|
| | |
| | CONDA_BASE=$(conda info --base 2>/dev/null || echo "") |
| |
|
| | if [ -z "$CONDA_BASE" ]; then |
| | echo -e "${RED}Error: could not find conda${NC}" |
| | echo "Please make sure conda is installed." |
| | exit 1 |
| | fi |
| |
|
| | |
| | source "${CONDA_BASE}/etc/profile.d/conda.sh" |
| |
|
| | |
| | TEMP_DIR=$(mktemp -d) |
| | |
| |
|
| | echo -e "${BLUE}[1/2] Extracting video features with SMKD...${NC}" |
| | echo " Environment: signx-slt (PyTorch)" |
| | echo "" |
| |
|
| | |
| | conda activate signx-slt |
| |
|
| | if [ $? -ne 0 ]; then |
| | echo -e "${RED}Error: failed to activate signx-slt environment${NC}" |
| | exit 1 |
| | fi |
| |
|
| | |
| | VIDEO_LIST_FILE="$TEMP_DIR/video_list.txt" |
| | echo "$VIDEO_PATH" > "$VIDEO_LIST_FILE" |
| |
|
| | echo " ✓ Temporary video list created: $VIDEO_LIST_FILE" |
| |
|
| | |
| | cd "$SCRIPT_DIR" |
| |
|
| | FEATURE_OUTPUT="$TEMP_DIR/features.h5" |
| |
|
| | python -c " |
| | import sys |
| | import os |
| | sys.path.insert(0, 'smkd') |
| | |
| | from smkd.sign_embedder import SignEmbedding |
| | import h5py |
| | import numpy as np |
| | |
| | print(' Loading SMKD model...') |
| | embedder = SignEmbedding( |
| | cfg='$SMKD_CONFIG', |
| | gloss_path='$GLOSS_DICT', |
| | sign_video_path='$VIDEO_LIST_FILE', |
| | model_path='$SMKD_MODEL', |
| | gpu_id='0', |
| | batch_size=1 |
| | ) |
| | |
| | print(' Extracting features...') |
| | features = embedder.embed() |
| | |
| | print(' Saving features to h5 file...') |
| | with h5py.File('$FEATURE_OUTPUT', 'w') as hf: |
| | for key, feature in features.items(): |
| | hf.create_dataset(key, data=feature) |
| | |
| | print(' ✓ Feature extraction complete:', '$FEATURE_OUTPUT') |
| | print(' Number of feature sequences:', len(features)) |
| | |
| | # Create source/target placeholder files for SLTUNET dataset |
| | # Format: <image_index> <text> |
| | # Use placeholder tokens because the gloss is what we want to predict |
| | with open('$TEMP_DIR/src.txt', 'w') as f: |
| | for key in sorted(features.keys(), key=lambda x: int(x)): |
| | f.write(key + ' <unk>\\n') # placeholder text |
| | |
| | with open('$TEMP_DIR/tgt.txt', 'w') as f: |
| | for key in sorted(features.keys(), key=lambda x: int(x)): |
| | f.write('<unk>\\n') |
| | |
| | print(' ✓ Source/target placeholder files ready') |
| | " |
| |
|
| | if [ $? -ne 0 ]; then |
| | echo -e "${RED}Error: SMKD feature extraction failed${NC}" |
| | exit 1 |
| | fi |
| |
|
| | echo "" |
| | echo -e "${GREEN}✓ Stage 1 complete: features extracted${NC}" |
| | echo "" |
| |
|
| | |
| | echo -e "${BLUE}[2/2] Generating gloss sequence with SLTUNET...${NC}" |
| | echo " Environment: slt_tf1 (TensorFlow)" |
| | echo "" |
| |
|
| | conda activate slt_tf1 |
| |
|
| | if [ $? -ne 0 ]; then |
| | echo -e "${RED}Error: failed to activate slt_tf1 environment${NC}" |
| | exit 1 |
| | fi |
| |
|
| | export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python |
| |
|
| | |
| | OUTPUT_DIR=$(dirname "$OUTPUT_PATH") |
| | PREDICTION_TXT="$TEMP_DIR/prediction.txt" |
| |
|
| | |
| | cat > "$TEMP_DIR/infer_config.py" <<EOF |
| | { |
| | 'sign_cfg': '$SMKD_CONFIG', |
| | 'gloss_path': '$GLOSS_DICT', |
| | 'smkd_model_path': '$SMKD_MODEL', |
| | 'img_test_file': '$FEATURE_OUTPUT', |
| | 'src_test_file': '$TEMP_DIR/src.txt', |
| | 'tgt_test_file': '$TEMP_DIR/tgt.txt', |
| | 'src_vocab_file': '$VOCAB_FILE', |
| | 'tgt_vocab_file': '$VOCAB_FILE', |
| | 'src_codes': '$BPE_CODES', |
| | 'tgt_codes': '$BPE_CODES', |
| | 'output_dir': '$SLTUNET_CHECKPOINT', |
| | 'test_output': '$PREDICTION_TXT', |
| | 'eval_batch_size': 1, |
| | 'gpus': [0], |
| | 'remove_bpe': True, |
| | 'collect_attention_weights': True, |
| | 'inference_video_path': '$VIDEO_PATH', |
| | } |
| | EOF |
| |
|
| | echo " Loading SLTUNET model..." |
| | echo " Running translation..." |
| | echo "" |
| |
|
| | cd "$SCRIPT_DIR" |
| |
|
| | |
| | python run.py \ |
| | --mode test \ |
| | --config "$TEMP_DIR/infer_config.py" \ |
| | 2>&1 | tee "$TEMP_DIR/full_output.log" | grep -E "(Loading|Evaluating|BLEU|Scores|Saving detailed|Error)" || true |
| |
|
| | if [ -f "$TEMP_DIR/prediction.txt" ]; then |
| | echo "" |
| | echo -e "${GREEN}✓ Inference complete: gloss sequence generated${NC}" |
| | echo "" |
| |
|
| | |
| | cp "$TEMP_DIR/prediction.txt" "$OUTPUT_PATH" |
| |
|
| | |
| | sed 's/@@ //g' "$OUTPUT_PATH" > "$OUTPUT_CLEAN_PATH" |
| |
|
| | |
| | DETAILED_DIRS=$(find "$TEMP_DIR" -maxdepth 1 -type d -name "detailed_*" 2>/dev/null) |
| | ATTENTION_ANALYSIS_DIR="" |
| |
|
| | if [ ! -z "$DETAILED_DIRS" ]; then |
| | echo -e "${BLUE}Detected detailed attention analysis, saving...${NC}" |
| | for detailed_dir in $DETAILED_DIRS; do |
| | dir_name=$(basename "$detailed_dir") |
| | dest_path="$INFERENCE_ROOT/$dir_name" |
| | mv "$detailed_dir" "$dest_path" |
| | ATTENTION_ANALYSIS_DIR="$dest_path" |
| |
|
| | |
| | mapfile -t SAMPLE_DIRS < <(find "$dest_path" -mindepth 1 -maxdepth 1 -type d -print | sort) |
| | sample_count=${#SAMPLE_DIRS[@]} |
| | echo " ✓ Saved $sample_count sample analyses to: $dest_path" |
| |
|
| | |
| | echo "" |
| | echo -e "${BLUE}Generating feature-to-frame mapping...${NC}" |
| | if [ -f "$SCRIPT_DIR/eval/generate_feature_mapping.py" ]; then |
| | |
| | conda activate signx-slt |
| | if [ ${#SAMPLE_DIRS[@]} -eq 0 ]; then |
| | echo " ⚠ No sample directories found, skipping mapping" |
| | else |
| | for sample_dir in "${SAMPLE_DIRS[@]}"; do |
| | if [ -d "$sample_dir" ]; then |
| | python "$SCRIPT_DIR/eval/generate_feature_mapping.py" "$sample_dir" "$VIDEO_PATH" 2>&1 | grep -E "(feature|frame|mapping|error)" |
| | fi |
| | done |
| | fi |
| | else |
| | echo " ⓘ generate_feature_mapping.py not found, skipping mapping" |
| | fi |
| |
|
| | |
| | echo "" |
| | echo -e "${BLUE}Regenerating visualizations (latest code)...${NC}" |
| | if [ -f "$SCRIPT_DIR/eval/regenerate_visualizations.py" ]; then |
| | if [ ${#SAMPLE_DIRS[@]} -eq 0 ]; then |
| | echo " ⚠ No sample directories found, skipping visualization" |
| | else |
| | python "$SCRIPT_DIR/eval/regenerate_visualizations.py" "$dest_path" "$VIDEO_PATH" |
| | fi |
| | else |
| | echo " ⓘ regenerate_visualizations.py not found, falling back to legacy scripts" |
| | if [ -f "$SCRIPT_DIR/eval/generate_gloss_frames.py" ]; then |
| | python "$SCRIPT_DIR/eval/generate_gloss_frames.py" "$dest_path" "$VIDEO_PATH" |
| | fi |
| | fi |
| |
|
| | |
| | echo "" |
| | echo -e "${BLUE}Creating interactive HTML visualization...${NC}" |
| | if [ -f "$SCRIPT_DIR/eval/generate_interactive_alignment.py" ]; then |
| | if [ ${#SAMPLE_DIRS[@]} -eq 0 ]; then |
| | echo " ⚠ No sample directories found, skipping HTML generation" |
| | else |
| | for sample_dir in "${SAMPLE_DIRS[@]}"; do |
| | if [ -d "$sample_dir" ]; then |
| | python "$SCRIPT_DIR/eval/generate_interactive_alignment.py" "$sample_dir" |
| | fi |
| | done |
| | fi |
| | else |
| | echo " ⓘ generate_interactive_alignment.py not found, skipping HTML generation" |
| | fi |
| |
|
| | |
| | echo "" |
| | echo -e "${BLUE}Extracting attention keyframes...${NC}" |
| | if [ -f "$SCRIPT_DIR/eval/extract_attention_keyframes.py" ]; then |
| | if [ ${#SAMPLE_DIRS[@]} -eq 0 ]; then |
| | echo " ⚠ No sample directories found, skipping keyframes" |
| | else |
| | for sample_dir in "${SAMPLE_DIRS[@]}"; do |
| | if [ -d "$sample_dir" ]; then |
| | echo " Processing sample: $(basename "$sample_dir")" |
| | python "$SCRIPT_DIR/eval/extract_attention_keyframes.py" "$sample_dir" "$VIDEO_PATH" |
| | fi |
| | done |
| | fi |
| | else |
| | echo " ⓘ extract_attention_keyframes.py not found, skipping keyframes" |
| | fi |
| |
|
| | |
| | conda activate slt_tf1 |
| | done |
| | fi |
| |
|
| | |
| | if [ ! -z "$ATTENTION_ANALYSIS_DIR" ] && [ -d "$ATTENTION_ANALYSIS_DIR" ]; then |
| | PRIMARY_SAMPLE_DIR=$(find "$ATTENTION_ANALYSIS_DIR" -mindepth 1 -maxdepth 1 -type d | sort | head -n 1) |
| | if [ ! -z "$PRIMARY_SAMPLE_DIR" ] && [ -d "$PRIMARY_SAMPLE_DIR" ]; then |
| | TRANSLATION_FILE="${PRIMARY_SAMPLE_DIR}/translation.txt" |
| |
|
| | |
| | MOVED_BPE_FILE="" |
| | MOVED_CLEAN_FILE="" |
| | if [ -f "$OUTPUT_PATH" ]; then |
| | NEW_OUTPUT_PATH="${PRIMARY_SAMPLE_DIR}/$(basename "$OUTPUT_PATH")" |
| | mv "$OUTPUT_PATH" "$NEW_OUTPUT_PATH" |
| | MOVED_BPE_FILE="$NEW_OUTPUT_PATH" |
| | fi |
| |
|
| | if [ -f "$OUTPUT_CLEAN_PATH" ]; then |
| | CLEAN_BASENAME=$(basename "$OUTPUT_CLEAN_PATH") |
| | NEW_CLEAN_PATH="${PRIMARY_SAMPLE_DIR}/${CLEAN_BASENAME}" |
| | mv "$OUTPUT_CLEAN_PATH" "$NEW_CLEAN_PATH" |
| | MOVED_CLEAN_FILE="$NEW_CLEAN_PATH" |
| | fi |
| |
|
| | |
| | if [ ! -f "$TRANSLATION_FILE" ]; then |
| | TRANS_BPE=$(head -n 1 "$TEMP_DIR/prediction.txt") |
| | TRANS_CLEAN=$(sed 's/@@ //g' "$TEMP_DIR/prediction.txt" | head -n 1) |
| | { |
| | echo "With BPE: ${TRANS_BPE}" |
| | echo "Clean: ${TRANS_CLEAN}" |
| | echo "Ground Truth: [NOT AVAILABLE]" |
| | } > "$TRANSLATION_FILE" |
| | fi |
| |
|
| | |
| | if [ -n "$MOVED_BPE_FILE" ] && [ -f "$MOVED_BPE_FILE" ] && [ "$MOVED_BPE_FILE" != "$TRANSLATION_FILE" ]; then |
| | rm -f "$MOVED_BPE_FILE" |
| | fi |
| | if [ -n "$MOVED_CLEAN_FILE" ] && [ -f "$MOVED_CLEAN_FILE" ] && [ "$MOVED_CLEAN_FILE" != "$TRANSLATION_FILE" ]; then |
| | rm -f "$MOVED_CLEAN_FILE" |
| | fi |
| |
|
| | |
| | if [ -f "$VIDEO_PATH" ]; then |
| | VIDEO_BASENAME=$(basename "$VIDEO_PATH") |
| | DEST_VIDEO_PATH="${PRIMARY_SAMPLE_DIR}/${VIDEO_BASENAME}" |
| | if [ ! -f "$DEST_VIDEO_PATH" ]; then |
| | cp "$VIDEO_PATH" "$DEST_VIDEO_PATH" |
| | fi |
| | fi |
| |
|
| | OUTPUT_PATH="$TRANSLATION_FILE" |
| | OUTPUT_CLEAN_PATH="$TRANSLATION_FILE" |
| | fi |
| | fi |
| |
|
| | echo "" |
| | echo "======================================================================" |
| | echo " Inference succeeded!" |
| | echo "======================================================================" |
| | echo "" |
| | echo "Output files:" |
| | echo " Raw (with BPE): $OUTPUT_PATH" |
| | echo " Cleaned result: $OUTPUT_CLEAN_PATH" |
| |
|
| | if [ ! -z "$ATTENTION_ANALYSIS_DIR" ]; then |
| | echo " Detailed analysis dir: $ATTENTION_ANALYSIS_DIR" |
| | echo "" |
| | echo "Attention assets include:" |
| | echo " - attention_heatmap.png" |
| | echo " - word_frame_alignment.png" |
| | echo " - gloss_to_frames.png" |
| | echo " - analysis_report.txt" |
| | echo " - attention_weights.npy" |
| | echo " - attention_keyframes/ (per-gloss keyframe previews)" |
| | echo " * peak feature frames per gloss" |
| | echo " * heatmaps overlayed on the video frames" |
| | fi |
| |
|
| | echo "" |
| | echo "Recognition result (BPE removed):" |
| | echo "----------------------------------------------------------------------" |
| | head -5 "$OUTPUT_CLEAN_PATH" | sed 's/^/ /' |
| | echo "----------------------------------------------------------------------" |
| | echo "" |
| | echo -e "${GREEN}✓ Full pipeline completed (SMKD → SLTUNET)${NC}" |
| | echo "" |
| |
|
| | |
| | echo -e "${BLUE}Cleaning temporary files...${NC}" |
| | rm -rf "$TEMP_DIR" |
| | echo " ✓ Temporary files removed" |
| | echo "" |
| | else |
| | echo -e "${RED}Error: inference failed, no output generated${NC}" |
| | rm -rf "$TEMP_DIR" |
| | exit 1 |
| | fi |
| |
|