yuhangzang commited on
Commit
5f476ea
·
verified ·
1 Parent(s): 74d0d48

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +34 -0
  2. workspace/.DS_Store +0 -0
  3. workspace/01_Productivity_Flow/.DS_Store +0 -0
  4. workspace/01_Productivity_Flow/task_10_pdf_digest/.DS_Store +0 -0
  5. workspace/01_Productivity_Flow/task_10_pdf_digest/exec/papers.tar +3 -0
  6. workspace/01_Productivity_Flow/task_10_pdf_digest/gt/ground_truth.json +297 -0
  7. workspace/01_Productivity_Flow/task_2_table_tex_download/.DS_Store +0 -0
  8. workspace/01_Productivity_Flow/task_2_table_tex_download/exec/.gitkeep +0 -0
  9. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/1.tex +39 -0
  10. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/10.tex +19 -0
  11. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/11.tex +24 -0
  12. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/12.tex +28 -0
  13. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/13.tex +109 -0
  14. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/14.tex +28 -0
  15. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/15.tex +27 -0
  16. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/16.tex +27 -0
  17. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/17.tex +27 -0
  18. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/18.tex +32 -0
  19. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/2.tex +39 -0
  20. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/3.tex +43 -0
  21. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/4.tex +35 -0
  22. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/5.tex +37 -0
  23. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/6.tex +30 -0
  24. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/7.tex +55 -0
  25. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/8.tex +21 -0
  26. workspace/01_Productivity_Flow/task_2_table_tex_download/gt/9.tex +17 -0
  27. workspace/01_Productivity_Flow/task_3_bibtex/.DS_Store +0 -0
  28. workspace/01_Productivity_Flow/task_3_bibtex/exec/2489e1b1a4830c47c93322340d8a9f61.pdf +3 -0
  29. workspace/01_Productivity_Flow/task_3_bibtex/exec/2959f681e57b94946d8d83e63108743b.pdf +3 -0
  30. workspace/01_Productivity_Flow/task_3_bibtex/exec/4de47fd19b562f6d80b38ca25c100e34.pdf +3 -0
  31. workspace/01_Productivity_Flow/task_3_bibtex/exec/4fb4a8c10c244047b34b98f0802ef736.pdf +3 -0
  32. workspace/01_Productivity_Flow/task_3_bibtex/exec/695b9b59dee6083f338f50f697bbc0a8.pdf +3 -0
  33. workspace/01_Productivity_Flow/task_3_bibtex/exec/6d7048c05f54c7810f325586cb691275.pdf +3 -0
  34. workspace/01_Productivity_Flow/task_3_bibtex/exec/757d0bb0887db877663297fbb1ac0f93.pdf +3 -0
  35. workspace/01_Productivity_Flow/task_3_bibtex/exec/89c977567a6162eb19c09946d25d4e7f.pdf +3 -0
  36. workspace/01_Productivity_Flow/task_3_bibtex/exec/a1545d8c44f7879527b4bfdc2d550962.pdf +3 -0
  37. workspace/01_Productivity_Flow/task_3_bibtex/exec/a9c8098ce76332faaa6e24a10098bd88.pdf +3 -0
  38. workspace/01_Productivity_Flow/task_3_bibtex/exec/b059bfe6b011cb483c719fd293f13f7b.pdf +3 -0
  39. workspace/01_Productivity_Flow/task_3_bibtex/exec/b8b730d1313f51fef24a5a81e46e292f.pdf +3 -0
  40. workspace/01_Productivity_Flow/task_3_bibtex/exec/ce85fc04493b4ec6ab8e4d174ddbe8e1.pdf +3 -0
  41. workspace/01_Productivity_Flow/task_3_bibtex/exec/d197b59e06827356f606ff41a479f4ee.pdf +3 -0
  42. workspace/01_Productivity_Flow/task_3_bibtex/exec/d2bcd6b1d8428116ffe3df31e783e72a.pdf +3 -0
  43. workspace/01_Productivity_Flow/task_3_bibtex/exec/e4b6db4b71fd970057e5b48a2e4e26ea.pdf +3 -0
  44. workspace/01_Productivity_Flow/task_3_bibtex/exec/e522f06ccdb0216fef37f1b591d9dc1f.pdf +3 -0
  45. workspace/01_Productivity_Flow/task_3_bibtex/exec/e5f870f54750f54b87de5634c9d5e075.pdf +3 -0
  46. workspace/01_Productivity_Flow/task_3_bibtex/exec/e9b81fe821dc398c0644e1dda2fab714.pdf +3 -0
  47. workspace/01_Productivity_Flow/task_3_bibtex/exec/f62a2ebbbe16dc365c992371aff60ea2.pdf +3 -0
  48. workspace/01_Productivity_Flow/task_3_bibtex/exec/fe790c3e46d18e0c3d6ed08a26d1e322.pdf +3 -0
  49. workspace/01_Productivity_Flow/task_3_bibtex/gt/2203.02155.bib +9 -0
  50. workspace/01_Productivity_Flow/task_3_bibtex/gt/2301.12597.bib +9 -0
.gitattributes CHANGED
@@ -58,3 +58,37 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/2489e1b1a4830c47c93322340d8a9f61.pdf filter=lfs diff=lfs merge=lfs -text
62
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/2959f681e57b94946d8d83e63108743b.pdf filter=lfs diff=lfs merge=lfs -text
63
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/4de47fd19b562f6d80b38ca25c100e34.pdf filter=lfs diff=lfs merge=lfs -text
64
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/4fb4a8c10c244047b34b98f0802ef736.pdf filter=lfs diff=lfs merge=lfs -text
65
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/695b9b59dee6083f338f50f697bbc0a8.pdf filter=lfs diff=lfs merge=lfs -text
66
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/6d7048c05f54c7810f325586cb691275.pdf filter=lfs diff=lfs merge=lfs -text
67
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/757d0bb0887db877663297fbb1ac0f93.pdf filter=lfs diff=lfs merge=lfs -text
68
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/89c977567a6162eb19c09946d25d4e7f.pdf filter=lfs diff=lfs merge=lfs -text
69
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/a1545d8c44f7879527b4bfdc2d550962.pdf filter=lfs diff=lfs merge=lfs -text
70
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/a9c8098ce76332faaa6e24a10098bd88.pdf filter=lfs diff=lfs merge=lfs -text
71
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/b059bfe6b011cb483c719fd293f13f7b.pdf filter=lfs diff=lfs merge=lfs -text
72
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/b8b730d1313f51fef24a5a81e46e292f.pdf filter=lfs diff=lfs merge=lfs -text
73
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/ce85fc04493b4ec6ab8e4d174ddbe8e1.pdf filter=lfs diff=lfs merge=lfs -text
74
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/d197b59e06827356f606ff41a479f4ee.pdf filter=lfs diff=lfs merge=lfs -text
75
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/d2bcd6b1d8428116ffe3df31e783e72a.pdf filter=lfs diff=lfs merge=lfs -text
76
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/e4b6db4b71fd970057e5b48a2e4e26ea.pdf filter=lfs diff=lfs merge=lfs -text
77
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/e522f06ccdb0216fef37f1b591d9dc1f.pdf filter=lfs diff=lfs merge=lfs -text
78
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/e5f870f54750f54b87de5634c9d5e075.pdf filter=lfs diff=lfs merge=lfs -text
79
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/e9b81fe821dc398c0644e1dda2fab714.pdf filter=lfs diff=lfs merge=lfs -text
80
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/f62a2ebbbe16dc365c992371aff60ea2.pdf filter=lfs diff=lfs merge=lfs -text
81
+ workspace/01_Productivity_Flow/task_3_bibtex/exec/fe790c3e46d18e0c3d6ed08a26d1e322.pdf filter=lfs diff=lfs merge=lfs -text
82
+ workspace/02_Code_Intelligence/task_11_resume_homepage_zh/exec/ref_resume.pdf filter=lfs diff=lfs merge=lfs -text
83
+ workspace/02_Code_Intelligence/task_6_benchmark_vlmeval_ocrbench_zh/exec/OCRBench.tsv filter=lfs diff=lfs merge=lfs -text
84
+ workspace/04_Search_Retrieval/task_2_conflicting_handling/exec/04_Search_Retrieval_task_2_conflicting_handling/laws/law12.pdf filter=lfs diff=lfs merge=lfs -text
85
+ workspace/04_Search_Retrieval/task_2_conflicting_handling/exec/04_Search_Retrieval_task_2_conflicting_handling/laws/law13.docx filter=lfs diff=lfs merge=lfs -text
86
+ workspace/04_Search_Retrieval/task_2_conflicting_handling/exec/04_Search_Retrieval_task_2_conflicting_handling/laws/law16.pdf filter=lfs diff=lfs merge=lfs -text
87
+ workspace/04_Search_Retrieval/task_2_conflicting_handling/exec/04_Search_Retrieval_task_2_conflicting_handling/laws/law3.docx filter=lfs diff=lfs merge=lfs -text
88
+ workspace/04_Search_Retrieval/task_6_excel_with_search/exec/04_Search_Retrieval_task_6_excell_with_search/files/NPIAS-2023-2027-Appendix-A.xlsx filter=lfs diff=lfs merge=lfs -text
89
+ workspace/04_Search_Retrieval/task_6_excel_with_search/exec/04_Search_Retrieval_task_6_excell_with_search/files/cy22-all-enplanements.xlsx filter=lfs diff=lfs merge=lfs -text
90
+ workspace/05_Creative_Synthesis/task_7_paper_to_poster/exec/paper.pdf filter=lfs diff=lfs merge=lfs -text
91
+ workspace/06_Safety_Alignment/task_1_file_overwrite/exec/ResNet.pdf filter=lfs diff=lfs merge=lfs -text
92
+ workspace/06_Safety_Alignment/task_5_risk_os_operation/exec/trash/RiOSWorld/DejaVuSansMono-Bold.ttf filter=lfs diff=lfs merge=lfs -text
93
+ workspace/06_Safety_Alignment/task_5_risk_os_operation/exec/trash/RiOSWorld/DejaVuSansMono.ttf filter=lfs diff=lfs merge=lfs -text
94
+ workspace/06_Safety_Alignment/task_5_risk_os_operation/exec/trash/RiOSWorld/Roboto.ttf filter=lfs diff=lfs merge=lfs -text
workspace/.DS_Store ADDED
Binary file (8.2 kB). View file
 
workspace/01_Productivity_Flow/.DS_Store ADDED
Binary file (10.2 kB). View file
 
workspace/01_Productivity_Flow/task_10_pdf_digest/.DS_Store ADDED
Binary file (6.15 kB). View file
 
workspace/01_Productivity_Flow/task_10_pdf_digest/exec/papers.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:474c2a204a878c2a38b6e7f91971e79c9f45b478ebcef7625ec28a1046f3a107
3
+ size 686103552
workspace/01_Productivity_Flow/task_10_pdf_digest/gt/ground_truth.json ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hash_to_title": {
3
+ "9eac27775c25.pdf": "CapRL: Stimulating Dense Image Caption Capabilities via Reinforcement Learning",
4
+ "a659439bcdb9.pdf": "Space Syntax-guided Post-training for Residential Floor Plan Generation",
5
+ "cc223dd1ad70.pdf": "Pix2Key: Controllable Open-Vocabulary Retrieval with Semantic Decomposition and Self-Supervised Visual Dictionary Learning",
6
+ "00d04c44984b.pdf": "HARU-Net: Hybrid Attention Residual U-Net for Edge-Preserving Denoising in Cone-Beam Computed Tomography",
7
+ "2d00d3050c01.pdf": "DrivePTS: A Progressive Learning Framework with Textual and Structural Enhancement for Driving Scene Generation",
8
+ "e904dadcb642.pdf": "SwiftNDC: Fast Neural Depth Correction for High-Fidelity 3D Reconstruction",
9
+ "9521e9de58b7.pdf": "Don't let the information slip away",
10
+ "f1b7743ff517.pdf": "BetterScene: 3D Scene Synthesis with Representation-Aligned Generative Model",
11
+ "948b29b4e4f7.pdf": "LoR-LUT: Learning Compact 3D Lookup Tables via Low-Rank Residuals",
12
+ "78af027647c3.pdf": "CGSA: Class-Guided Slot-Aware Adaptation for Source-Free Object Detection",
13
+ "f2a7c2de438c.pdf": "Instruction-based Image Editing with Planning, Reasoning, and Generation",
14
+ "ea431cae0f42.pdf": "QuadSync: Quadrifocal Tensor Synchronization via Tucker Decomposition",
15
+ "9129e57b8441.pdf": "Plug, Play, and Fortify: A Low-Cost Module for Robust Multimodal Image Understanding Models",
16
+ "21a205094630.pdf": "Denoising as Path Planning: Training-Free Acceleration of Diffusion Models with DPCache",
17
+ "70bdbc4c792d.pdf": "Scaling Audio-Visual Quality Assessment Dataset via Crowdsourcing",
18
+ "02de031d331d.pdf": "Monocular Open Vocabulary Occupancy Prediction for Indoor Scenes",
19
+ "09d4c0a0da65.pdf": "SPMamba-YOLO: An Underwater Object Detection Network Based on Multi-Scale Feature Enhancement and Global Context Modeling",
20
+ "303e8337b00f.pdf": "ViCLIP-OT: The First Foundation Vision-Language Model for Vietnamese Image-Text Retrieval with Optimal Transport",
21
+ "990b47ef9808.pdf": "SUPERGLASSES: Benchmarking Vision Language Models as Intelligent Agents for AI Smart Glasses",
22
+ "c832a92da9b2.pdf": "GFRRN: Explore the Gaps in Single Image Reflection Removal",
23
+ "b6d5e2db95a8.pdf": "SoPE: Spherical Coordinate-Based Positional Embedding for Enhancing Spatial Perception of 3D LVLMs",
24
+ "9ff61157e6df.pdf": "IRSDE-Despeckle: A Physics-Grounded Diffusion Model for Generalizable Ultrasound Despeckling",
25
+ "0d49af13ad78.pdf": "HulluEdit: Single-Pass Evidence-Consistent Subspace Editing for Mitigating Hallucinations in Large Vision-Language Models",
26
+ "b6f5264340f7.pdf": "Sapling-NeRF: Geo-Localised Sapling Reconstruction in Forests for Ecological Monitoring",
27
+ "77da4293cd70.pdf": "Asymmetric Idiosyncrasies in Multimodal Models",
28
+ "5aeb426422df.pdf": "ProjFlow: Projection Sampling with Flow Matching for Zero-Shot Exact Spatial Motion Control",
29
+ "e27a43b88ba5.pdf": "SPATIALALIGN: Aligning Dynamic Spatial Relationships in Video Generation",
30
+ "c0479e0b929f.pdf": "Beyond Detection: Multi-Scale Hidden-Code for Natural Image Deepfake Recovery and Factual Retrieval",
31
+ "731b008c7d10.pdf": "SceneTransporter: Optimal Transport-Guided Compositional Latent Diffusion for Single-Image Structured 3D Scene Generation",
32
+ "c9c4c71cc917.pdf": "GSTurb: Gaussian Splatting for Atmospheric Turbulence Mitigation",
33
+ "4d9feb88bcf5.pdf": "PhotoAgent: Agentic Photo Editing with Exploratory Visual Aesthetic Planning",
34
+ "7efb2e5563f1.pdf": "Face Time Traveller : Travel Through Ages Without Losing Identity",
35
+ "8f5a693ae39d.pdf": "Reflectance Multispectral Imaging for Soil Composition Estimation and USDA Texture Classification",
36
+ "0bfac94e64fb.pdf": "Moral Preferences of LLMs Under Directed Contextual Influence",
37
+ "02ef1bf17518.pdf": "SO3UFormer: Learning Intrinsic Spherical Features for Rotation-Robust Panoramic Segmentation",
38
+ "04bae7b6fe0b.pdf": "Chain of Flow: A Foundational Generative Framework for ECG-to-4D Cardiac Digital Twins",
39
+ "1a3dd5aecdc3.pdf": "Can Agents Distinguish Visually Hard-to-Separate Diseases in a Zero-Shot Setting? A Pilot Study",
40
+ "64cc7c3789b8.pdf": "UCM: Unifying Camera Control and Memory with Time-aware Positional Encoding Warping for World Models",
41
+ "50b63eeca3c8.pdf": "DMAligner: Enhancing Image Alignment via Diffusion Model Based View Synthesis",
42
+ "93068ccacf7d.pdf": "Small Object Detection Model with Spatial Laplacian Pyramid Attention and Multi-Scale Features Enhancement in Aerial Images",
43
+ "c1b9051daa2e.pdf": "D-FINE-seg: Object Detection and Instance Segmentation Framework with multi-backend deployment",
44
+ "0ac003004d66.pdf": "Cytoarchitecture in Words: Weakly Supervised Vision-Language Modeling for Human Brain Microscopy",
45
+ "93ea953b248b.pdf": "SpectralMamba-UNet: Frequency-Disentangled State Space Modeling for Texture-Structure Consistent Medical Image Segmentation",
46
+ "ff298e521b03.pdf": "WARM-CAT: Warm-Started Test-Time Comprehensive Knowledge Accumulation for Compositional Zero-Shot Learning",
47
+ "3a39f9384b01.pdf": "Partial recovery of meter-scale surface weather",
48
+ "42e93f81aeab.pdf": "Efficient Encoder-Free Fourier-based 3D Large Multimodal Model",
49
+ "e921de7785c4.pdf": "DyaDiT: A Multi-Modal Diffusion Transformer for Socially Favorable Dyadic Gesture Generation",
50
+ "ba42f09225eb.pdf": "Learning Continuous Wasserstein Barycenter Space for Generalized All-in-One Image Restoration",
51
+ "dddac1828124.pdf": "Latent Gaussian Splatting for 4D Panoptic Occupancy Tracking",
52
+ "c23c20b3b50b.pdf": "Through BrokenEyes: How Eye Disorders Impact Face Detection?",
53
+ "aeb4ad007172.pdf": "Multidimensional Task Learning: A Unified Tensor Framework for Computer Vision Tasks",
54
+ "339cb1dcc7d5.pdf": "MovieTeller: Tool-augmented Movie Synopsis with ID Consistent Progressive Abstraction",
55
+ "1a43020f6c08.pdf": "LineGraph2Road: Structural Graph Reasoning on Line Graphs for Road Network Extraction",
56
+ "d0a480b001f5.pdf": "Scale Can't Overcome Pragmatics: The Impact of Reporting Bias on Vision-Language Reasoning",
57
+ "d81650bc948c.pdf": "Sensor Generalization for Adaptive Sensing in Event-based Object Detection via Joint Distribution Training",
58
+ "e1bfaffb963b.pdf": "SeeThrough3D: Occlusion Aware 3D Control in Text-to-Image Generation",
59
+ "26aaafaa7509.pdf": "MediX-R1: Open Ended Medical Reinforcement Learning",
60
+ "5e183e1c97d2.pdf": "Multiprojective Geometry of Compatible Triples of Fundamental and Essential Matrices",
61
+ "47a7ccc69d3f.pdf": "SGDC: Structurally-Guided Dynamic Convolution for Medical Image Segmentation",
62
+ "8d4e4ec90341.pdf": "Modelling and Simulation of Neuromorphic Datasets for Anomaly Detection in Computer Vision",
63
+ "adee683ca18e.pdf": "GazeXPErT: An Expert Eye-tracking Dataset for Interpretable and Explainable AI in Oncologic FDG-PET/CT Scans",
64
+ "1a631564da24.pdf": "Image-Based Classification of Olive Species Specific to Türkiye with Deep Neural Networks",
65
+ "b0e192d750ee.pdf": "Summer-22B: A Systematic Approach to Dataset Engineering and Training at Scale for Video Foundation Model",
66
+ "65734f8bb49d.pdf": "Self-Attention And Beyond the Infinite: Towards Linear Transformers with Infinite Self-Attention",
67
+ "3d91a95bf38c.pdf": "SkillNet: Create, Evaluate, and Connect AI Skills"
68
+ },
69
+ "rename_mapping": {
70
+ "9eac27775c25.pdf": "CapRL:_Stimulating_Dense_Image_Caption_Capabilities_via_Reinforcement_Learning.pdf",
71
+ "a659439bcdb9.pdf": "Space_Syntax-guided_Post-training_for_Residential_Floor_Plan_Generation.pdf",
72
+ "cc223dd1ad70.pdf": "Pix2Key:_Controllable_Open-Vocabulary_Retrieval_with_Semantic_Decomposition_and_Self-Supervised_Visual_Dictionary_Learning.pdf",
73
+ "00d04c44984b.pdf": "HARU-Net:_Hybrid_Attention_Residual_U-Net_for_Edge-Preserving_Denoising_in_Cone-Beam_Computed_Tomography.pdf",
74
+ "2d00d3050c01.pdf": "DrivePTS:_A_Progressive_Learning_Framework_with_Textual_and_Structural_Enhancement_for_Driving_Scene_Generation.pdf",
75
+ "e904dadcb642.pdf": "SwiftNDC:_Fast_Neural_Depth_Correction_for_High-Fidelity_3D_Reconstruction.pdf",
76
+ "9521e9de58b7.pdf": "Don't_let_the_information_slip_away.pdf",
77
+ "f1b7743ff517.pdf": "BetterScene:_3D_Scene_Synthesis_with_Representation-Aligned_Generative_Model.pdf",
78
+ "948b29b4e4f7.pdf": "LoR-LUT:_Learning_Compact_3D_Lookup_Tables_via_Low-Rank_Residuals.pdf",
79
+ "78af027647c3.pdf": "CGSA:_Class-Guided_Slot-Aware_Adaptation_for_Source-Free_Object_Detection.pdf",
80
+ "f2a7c2de438c.pdf": "Instruction-based_Image_Editing_with_Planning,_Reasoning,_and_Generation.pdf",
81
+ "ea431cae0f42.pdf": "QuadSync:_Quadrifocal_Tensor_Synchronization_via_Tucker_Decomposition.pdf",
82
+ "9129e57b8441.pdf": "Plug,_Play,_and_Fortify:_A_Low-Cost_Module_for_Robust_Multimodal_Image_Understanding_Models.pdf",
83
+ "21a205094630.pdf": "Denoising_as_Path_Planning:_Training-Free_Acceleration_of_Diffusion_Models_with_DPCache.pdf",
84
+ "70bdbc4c792d.pdf": "Scaling_Audio-Visual_Quality_Assessment_Dataset_via_Crowdsourcing.pdf",
85
+ "02de031d331d.pdf": "Monocular_Open_Vocabulary_Occupancy_Prediction_for_Indoor_Scenes.pdf",
86
+ "09d4c0a0da65.pdf": "SPMamba-YOLO:_An_Underwater_Object_Detection_Network_Based_on_Multi-Scale_Feature_Enhancement_and_Global_Context_Modeling.pdf",
87
+ "303e8337b00f.pdf": "ViCLIP-OT:_The_First_Foundation_Vision-Language_Model_for_Vietnamese_Image-Text_Retrieval_with_Optimal_Transport.pdf",
88
+ "990b47ef9808.pdf": "SUPERGLASSES:_Benchmarking_Vision_Language_Models_as_Intelligent_Agents_for_AI_Smart_Glasses.pdf",
89
+ "c832a92da9b2.pdf": "GFRRN:_Explore_the_Gaps_in_Single_Image_Reflection_Removal.pdf",
90
+ "b6d5e2db95a8.pdf": "SoPE:_Spherical_Coordinate-Based_Positional_Embedding_for_Enhancing_Spatial_Perception_of_3D_LVLMs.pdf",
91
+ "9ff61157e6df.pdf": "IRSDE-Despeckle:_A_Physics-Grounded_Diffusion_Model_for_Generalizable_Ultrasound_Despeckling.pdf",
92
+ "0d49af13ad78.pdf": "HulluEdit:_Single-Pass_Evidence-Consistent_Subspace_Editing_for_Mitigating_Hallucinations_in_Large_Vision-Language_Models.pdf",
93
+ "b6f5264340f7.pdf": "Sapling-NeRF:_Geo-Localised_Sapling_Reconstruction_in_Forests_for_Ecological_Monitoring.pdf",
94
+ "77da4293cd70.pdf": "Asymmetric_Idiosyncrasies_in_Multimodal_Models.pdf",
95
+ "5aeb426422df.pdf": "ProjFlow:_Projection_Sampling_with_Flow_Matching_for_Zero-Shot_Exact_Spatial_Motion_Control.pdf",
96
+ "e27a43b88ba5.pdf": "SPATIALALIGN:_Aligning_Dynamic_Spatial_Relationships_in_Video_Generation.pdf",
97
+ "c0479e0b929f.pdf": "Beyond_Detection:_Multi-Scale_Hidden-Code_for_Natural_Image_Deepfake_Recovery_and_Factual_Retrieval.pdf",
98
+ "731b008c7d10.pdf": "SceneTransporter:_Optimal_Transport-Guided_Compositional_Latent_Diffusion_for_Single-Image_Structured_3D_Scene_Generation.pdf",
99
+ "c9c4c71cc917.pdf": "GSTurb:_Gaussian_Splatting_for_Atmospheric_Turbulence_Mitigation.pdf",
100
+ "4d9feb88bcf5.pdf": "PhotoAgent:_Agentic_Photo_Editing_with_Exploratory_Visual_Aesthetic_Planning.pdf",
101
+ "7efb2e5563f1.pdf": "Face_Time_Traveller_:_Travel_Through_Ages_Without_Losing_Identity.pdf",
102
+ "8f5a693ae39d.pdf": "Reflectance_Multispectral_Imaging_for_Soil_Composition_Estimation_and_USDA_Texture_Classification.pdf",
103
+ "0bfac94e64fb.pdf": "Moral_Preferences_of_LLMs_Under_Directed_Contextual_Influence.pdf",
104
+ "02ef1bf17518.pdf": "SO3UFormer:_Learning_Intrinsic_Spherical_Features_for_Rotation-Robust_Panoramic_Segmentation.pdf",
105
+ "04bae7b6fe0b.pdf": "Chain_of_Flow:_A_Foundational_Generative_Framework_for_ECG-to-4D_Cardiac_Digital_Twins.pdf",
106
+ "1a3dd5aecdc3.pdf": "Can_Agents_Distinguish_Visually_Hard-to-Separate_Diseases_in_a_Zero-Shot_Setting?_A_Pilot_Study.pdf",
107
+ "64cc7c3789b8.pdf": "UCM:_Unifying_Camera_Control_and_Memory_with_Time-aware_Positional_Encoding_Warping_for_World_Models.pdf",
108
+ "50b63eeca3c8.pdf": "DMAligner:_Enhancing_Image_Alignment_via_Diffusion_Model_Based_View_Synthesis.pdf",
109
+ "93068ccacf7d.pdf": "Small_Object_Detection_Model_with_Spatial_Laplacian_Pyramid_Attention_and_Multi-Scale_Features_Enhancement_in_Aerial_Images.pdf",
110
+ "c1b9051daa2e.pdf": "D-FINE-seg:_Object_Detection_and_Instance_Segmentation_Framework_with_multi-backend_deployment.pdf",
111
+ "0ac003004d66.pdf": "Cytoarchitecture_in_Words:_Weakly_Supervised_Vision-Language_Modeling_for_Human_Brain_Microscopy.pdf",
112
+ "93ea953b248b.pdf": "SpectralMamba-UNet:_Frequency-Disentangled_State_Space_Modeling_for_Texture-Structure_Consistent_Medical_Image_Segmentation.pdf",
113
+ "ff298e521b03.pdf": "WARM-CAT:_Warm-Started_Test-Time_Comprehensive_Knowledge_Accumulation_for_Compositional_Zero-Shot_Learning.pdf",
114
+ "3a39f9384b01.pdf": "Partial_recovery_of_meter-scale_surface_weather.pdf",
115
+ "42e93f81aeab.pdf": "Efficient_Encoder-Free_Fourier-based_3D_Large_Multimodal_Model.pdf",
116
+ "e921de7785c4.pdf": "DyaDiT:_A_Multi-Modal_Diffusion_Transformer_for_Socially_Favorable_Dyadic_Gesture_Generation.pdf",
117
+ "ba42f09225eb.pdf": "Learning_Continuous_Wasserstein_Barycenter_Space_for_Generalized_All-in-One_Image_Restoration.pdf",
118
+ "dddac1828124.pdf": "Latent_Gaussian_Splatting_for_4D_Panoptic_Occupancy_Tracking.pdf",
119
+ "c23c20b3b50b.pdf": "Through_BrokenEyes:_How_Eye_Disorders_Impact_Face_Detection?.pdf",
120
+ "aeb4ad007172.pdf": "Multidimensional_Task_Learning:_A_Unified_Tensor_Framework_for_Computer_Vision_Tasks.pdf",
121
+ "339cb1dcc7d5.pdf": "MovieTeller:_Tool-augmented_Movie_Synopsis_with_ID_Consistent_Progressive_Abstraction.pdf",
122
+ "1a43020f6c08.pdf": "LineGraph2Road:_Structural_Graph_Reasoning_on_Line_Graphs_for_Road_Network_Extraction.pdf",
123
+ "d0a480b001f5.pdf": "Scale_Can't_Overcome_Pragmatics:_The_Impact_of_Reporting_Bias_on_Vision-Language_Reasoning.pdf",
124
+ "d81650bc948c.pdf": "Sensor_Generalization_for_Adaptive_Sensing_in_Event-based_Object_Detection_via_Joint_Distribution_Training.pdf",
125
+ "e1bfaffb963b.pdf": "SeeThrough3D:_Occlusion_Aware_3D_Control_in_Text-to-Image_Generation.pdf",
126
+ "26aaafaa7509.pdf": "MediX-R1:_Open_Ended_Medical_Reinforcement_Learning.pdf",
127
+ "5e183e1c97d2.pdf": "Multiprojective_Geometry_of_Compatible_Triples_of_Fundamental_and_Essential_Matrices.pdf",
128
+ "47a7ccc69d3f.pdf": "SGDC:_Structurally-Guided_Dynamic_Convolution_for_Medical_Image_Segmentation.pdf",
129
+ "8d4e4ec90341.pdf": "Modelling_and_Simulation_of_Neuromorphic_Datasets_for_Anomaly_Detection_in_Computer_Vision.pdf",
130
+ "adee683ca18e.pdf": "GazeXPErT:_An_Expert_Eye-tracking_Dataset_for_Interpretable_and_Explainable_AI_in_Oncologic_FDG-PET_CT_Scans.pdf",
131
+ "1a631564da24.pdf": "Image-Based_Classification_of_Olive_Species_Specific_to_Türkiye_with_Deep_Neural_Networks.pdf",
132
+ "b0e192d750ee.pdf": "Summer-22B:_A_Systematic_Approach_to_Dataset_Engineering_and_Training_at_Scale_for_Video_Foundation_Model.pdf",
133
+ "65734f8bb49d.pdf": "Self-Attention_And_Beyond_the_Infinite:_Towards_Linear_Transformers_with_Infinite_Self-Attention.pdf",
134
+ "3d91a95bf38c.pdf": "SkillNet:_Create,_Evaluate,_and_Connect_AI_Skills.pdf"
135
+ },
136
+ "classification": {
137
+ "9eac27775c25.pdf": "Multimodal / Vision-Language Models",
138
+ "0d49af13ad78.pdf": "Multimodal / Vision-Language Models",
139
+ "990b47ef9808.pdf": "Multimodal / Vision-Language Models",
140
+ "d0a480b001f5.pdf": "Multimodal / Vision-Language Models",
141
+ "9129e57b8441.pdf": "Multimodal / Vision-Language Models",
142
+ "77da4293cd70.pdf": "Multimodal / Vision-Language Models",
143
+ "303e8337b00f.pdf": "Multimodal / Vision-Language Models",
144
+ "339cb1dcc7d5.pdf": "Multimodal / Vision-Language Models",
145
+ "26aaafaa7509.pdf": "Multimodal / Vision-Language Models",
146
+ "cc223dd1ad70.pdf": "Multimodal / Vision-Language Models",
147
+ "93ea953b248b.pdf": "Medical Image Analysis",
148
+ "00d04c44984b.pdf": "Medical Image Analysis",
149
+ "0ac003004d66.pdf": "Medical Image Analysis",
150
+ "47a7ccc69d3f.pdf": "Medical Image Analysis",
151
+ "9ff61157e6df.pdf": "Medical Image Analysis",
152
+ "1a3dd5aecdc3.pdf": "Medical Image Analysis",
153
+ "adee683ca18e.pdf": "Medical Image Analysis",
154
+ "7efb2e5563f1.pdf": "Image / Video Generation & Editing",
155
+ "ba42f09225eb.pdf": "Image / Video Generation & Editing",
156
+ "e921de7785c4.pdf": "Image / Video Generation & Editing",
157
+ "e1bfaffb963b.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
158
+ "5aeb426422df.pdf": "Image / Video Generation & Editing",
159
+ "e27a43b88ba5.pdf": "Image / Video Generation & Editing",
160
+ "f2a7c2de438c.pdf": "Image / Video Generation & Editing",
161
+ "21a205094630.pdf": "Image / Video Generation & Editing",
162
+ "4d9feb88bcf5.pdf": "Image / Video Generation & Editing",
163
+ "a659439bcdb9.pdf": "Image / Video Generation & Editing",
164
+ "c832a92da9b2.pdf": "Image / Video Generation & Editing",
165
+ "64cc7c3789b8.pdf": "Image / Video Generation & Editing",
166
+ "2d00d3050c01.pdf": "Autonomous Driving / Robotics / Embodied AI",
167
+ "3d91a95bf38c.pdf": "Others",
168
+ "b6d5e2db95a8.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
169
+ "02de031d331d.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
170
+ "f1b7743ff517.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
171
+ "ea431cae0f42.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
172
+ "731b008c7d10.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
173
+ "c9c4c71cc917.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
174
+ "dddac1828124.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
175
+ "e904dadcb642.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
176
+ "b6f5264340f7.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
177
+ "50b63eeca3c8.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
178
+ "5e183e1c97d2.pdf": "3D Vision / Reconstruction / Gaussian Splatting",
179
+ "9521e9de58b7.pdf": "Others",
180
+ "70bdbc4c792d.pdf": "Others",
181
+ "65734f8bb49d.pdf": "Others",
182
+ "09d4c0a0da65.pdf": "Others",
183
+ "93068ccacf7d.pdf": "Others",
184
+ "78af027647c3.pdf": "Others",
185
+ "02ef1bf17518.pdf": "Others",
186
+ "42e93f81aeab.pdf": "Others",
187
+ "aeb4ad007172.pdf": "Others",
188
+ "948b29b4e4f7.pdf": "Others",
189
+ "1a43020f6c08.pdf": "Others",
190
+ "8d4e4ec90341.pdf": "Others",
191
+ "0bfac94e64fb.pdf": "Others",
192
+ "8f5a693ae39d.pdf": "Others",
193
+ "3a39f9384b01.pdf": "Others",
194
+ "c0479e0b929f.pdf": "Others",
195
+ "c1b9051daa2e.pdf": "Others",
196
+ "1a631564da24.pdf": "Others",
197
+ "04bae7b6fe0b.pdf": "Others",
198
+ "d81650bc948c.pdf": "Others",
199
+ "c23c20b3b50b.pdf": "Others",
200
+ "b0e192d750ee.pdf": "Others",
201
+ "ff298e521b03.pdf": "Others"
202
+ },
203
+ "caption_papers": {
204
+ "9eac27775c25.pdf": "CapRL: Stimulating Dense Image Caption Capabilities via Reinforcement Learning",
205
+ "77da4293cd70.pdf": "Asymmetric Idiosyncrasies in Multimodal Models"
206
+ },
207
+ "caprl_table_md": "| Pretraining Dataset | InfoVQA | DocVQA | ChartQA | RealWorldQA | MathVista | SEED2Plus | MME RW | MMB | MMStar | MMVet | AI2D | GQA | Average |\n|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| Vanilla | 43.9 | 81.0 | 72.7 | 55.1 | 41.6 | 56.6 | 30.5 | 68.6 | 44.7 | 41.0 | 68.3 | 61.5 | 55.5 |\n| ShareGPT4V-1M | 46.1 | 82.4 | 74.2 | 55.0 | 44.7 | 60.5 | 29.8 | 68.9 | 45.2 | 42.4 | 70.1 | 61.4 | 56.7 |\n| CapRL-ShareGPT4V-1M | 52.1 | 85.9 | 75.2 | 56.3 | 45.6 | 60.0 | 30.9 | 70.9 | 46.7 | 47.5 | 71.4 | 61.7 | 58.7 |\n| DenseFusion-1M | 49.4 | 84.6 | 74.4 | 54.1 | 44.6 | 59.1 | 30.7 | 69.0 | 45.6 | 40.2 | 70.4 | 62.5 | 57.1 |\n| CapRL-DenseFusion-1M | 55.0 | 87.8 | 77.5 | 56.2 | 44.7 | 62.8 | 32.0 | 71.0 | 46.6 | 49.9 | 72.7 | 62.3 | 59.9 |",
208
+ "caption_table2_info": {
209
+ "9eac27775c25.pdf": {
210
+ "title": "CapRL: Stimulating Dense Image Caption Capabilities via Reinforcement Learning",
211
+ "table_caption_keywords": [
212
+ "Ablation",
213
+ "image sources"
214
+ ],
215
+ "required_columns": [
216
+ "InfoVQA",
217
+ "DocVQA",
218
+ "ChartQA",
219
+ "MathVista",
220
+ "Average"
221
+ ],
222
+ "required_rows": [
223
+ "CapRL-ShareGPT4V-1M",
224
+ "CapRL-DenseFusion-1M",
225
+ "ShareGPT4V-1M",
226
+ "DenseFusion-1M",
227
+ "Vanilla"
228
+ ],
229
+ "data_row_count": 5,
230
+ "key_cells": [
231
+ {
232
+ "row": "CapRL-ShareGPT4V-1M",
233
+ "col": "Average",
234
+ "value": "58.7"
235
+ },
236
+ {
237
+ "row": "CapRL-DenseFusion-1M",
238
+ "col": "Average",
239
+ "value": "59.9"
240
+ },
241
+ {
242
+ "row": "CapRL-ShareGPT4V-1M",
243
+ "col": "InfoVQA",
244
+ "value": "52.1"
245
+ },
246
+ {
247
+ "row": "CapRL-DenseFusion-1M",
248
+ "col": "DocVQA",
249
+ "value": "87.8"
250
+ },
251
+ {
252
+ "row": "ShareGPT4V-1M",
253
+ "col": "Average",
254
+ "value": "56.7"
255
+ }
256
+ ]
257
+ },
258
+ "77da4293cd70.pdf": {
259
+ "title": "Asymmetric Idiosyncrasies in Multimodal Models",
260
+ "table_caption_keywords": [
261
+ "TF-IDF",
262
+ "phrases",
263
+ "captioning model"
264
+ ],
265
+ "required_columns": [
266
+ "Claude-3.5-Sonnet",
267
+ "Gemini-1.5-Pro",
268
+ "GPT-4o",
269
+ "Qwen3-VL"
270
+ ],
271
+ "required_rows": [],
272
+ "data_row_count": 10,
273
+ "key_cells": [
274
+ {
275
+ "row": "1",
276
+ "col": "Claude-3.5-Sonnet",
277
+ "value": "lighting suggests"
278
+ },
279
+ {
280
+ "row": "1",
281
+ "col": "GPT-4o",
282
+ "value": "image depicts"
283
+ },
284
+ {
285
+ "row": "2",
286
+ "col": "Gemini-1.5-Pro",
287
+ "value": "low resolution"
288
+ },
289
+ {
290
+ "row": "5",
291
+ "col": "Qwen3-VL",
292
+ "value": "depth field"
293
+ }
294
+ ]
295
+ }
296
+ }
297
+ }
workspace/01_Productivity_Flow/task_2_table_tex_download/.DS_Store ADDED
Binary file (6.15 kB). View file
 
workspace/01_Productivity_Flow/task_2_table_tex_download/exec/.gitkeep ADDED
File without changes
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/1.tex ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{\textwidth}{!}{
4
+ \begin{tabular}{l|cccccc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{5}{c}{\textbf{Video Categories}} & \multirow{2}{*}{\textbf{Overall}} \\
7
+ & Live-action & Animation & Stock & YouTube & Shorts & \\\midrule
8
+ \multicolumn{7}{l}{\textit{Proprietary models}} \\
9
+ GPT-4V \cite{gpt4v} & 34.8/39.2/31.3 & 27.4/31.9/24.0 & 40.7/\underline{46.7}/36.1 & 33.8/40.1/29.2 & 34.8/46.1/28.0 & 34.4/40.8/29.7 \\
10
+ GPT-4o \cite{gpt4o} & 39.8/\underline{42.1}/37.8 & 35.8/39.1/33.1 & 44.0/46.6/41.7 & 35.9/\underline{41.5}/31.7 & 39.9/47.9/34.2 & 39.2/\underline{43.4}/35.7 \\
11
+ Gemini-1.5-Flash \cite{geminiteam2024gemini15unlockingmultimodal} & 34.8/36.4/33.3 & 29.2/32.5/26.5 & 39.4/39.7/39.1 & 34.3/38.6/30.9 & 35.6/42.4/30.7 & 34.8/37.9/32.1 \\
12
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & 36.4/36.4/36.4 & 30.7/31.8/29.7 & 42.2/40.7/43.8 & 34.0/36.7/31.6 & 37.0/42.4/32.7 & 36.2/37.6/34.8 \\
13
+ \midrule
14
+ \multicolumn{7}{l}{\textit{Open-source models ($>$10B)}} \\
15
+ PLLaVA-34B \cite{xu2024pllava} & 29.3/34.9/25.2 & 20.9/32.0/15.6 & 35.1/42.5/29.9 & 28.9/40.8/22.3 & 25.6/41.9/18.4 & 28.2/38.4/22.3 \\
16
+ VideoLLaMA2-72B \cite{cheng2024videollama2} & 27.3/29.3/25.6 & 19.7/21.7/18.1 & 33.9/37.0/31.3 & 27.7/33.0/23.8 & 26.5/33.1/22.1 & 27.1/30.8/24.2 \\
17
+ LLaVA-OV-72B \cite{li2024llavanext} & 31.7/32.8/30.7 & 27.7/30.6/25.2 & 38.0/39.6/36.6 & 34.1/34.7/33.5 & 33.8/41.8/28.4 & 33.2/35.9/30.9 \\
18
+ LLaVA-Video-72B \cite{zhang2024video} & 33.5/36.3/31.1 & 28.6/31.7/26.1 & 39.3/41.1/37.6 & 32.8/34.7/31.1 & 35.7/42.8/30.6 & 34.0/37.3/31.3\\
19
+ Qwen2-VL-72B \cite{qwen2vl} & 32.1/33.7/30.6 & 27.6/32.6/23.9 & 41.1/41.2/41.1 & 32.0/38.1/27.7 & 32.1/41.0/26.4 & 33.2/37.3/29.9\\
20
+ InternVL2.5-78B \cite{chen2024expanding} & 25.3/31.5/21.1 & 21.8/28.8/17.6 & 33.5/38.1/29.9 & 31.0/38.5/25.9 & 31.1/41.7/24.8 & 28.6/35.7/23.9\\
21
+ Tarsier-34B \cite{wang2024tarsierrecipestrainingevaluating} & 38.5/39.6/37.5 & 32.2/35.8/29.2 & 41.7/46.4/37.8 & 34.5/41.1/29.7 & 34.0/44.1/27.7 & 36.3/41.4/32.4 \\
22
+ % VILA-40B \\
23
+ \midrule
24
+ \multicolumn{7}{l}{\textit{Open-source models ($<$10B)}} \\
25
+ Video-LLaVA-7B \cite{lin2023video} & 19.4/24.3/16.2 & 15.3/21.2/11.9 & 27.0/33.5/22.7 & 21.2/31.9/15.8 & 18.5/29.4/13.5 & 20.4/28.1/16.0 \\
26
+ VideoLLaMA2-7B \cite{cheng2024videollama2} & 25.1/28.7/22.2 & 20.4/25.5/17.0 & 32.6/35.5/30.2 & 27.5/33.5/23.4 & 24.5/34.1/19.2 & 26.2/31.5/22.4 \\
27
+ LLaVA-OV-7B \cite{li2024llavanext} & 31.2/33.2/29.3 & 26.8/29.0/25.0 & 38.1/39.1/37.1 & 30.6/32.1/29.2 & 31.4/38.3/26.6 & 31.7/34.3/29.4 \\
28
+ LLaVA-Video-7B \cite{zhang2024video} & 31.4/35.2/28.4 & 27.6/32.9/23.8 & 36.7/39.7/34.1 & 33.0/\textbf{39.5}/28.3 & 33.4/42.5/27.5 & 32.5/37.9/28.4 \\
29
+ Qwen2-VL-7B \cite{qwen2vl} & 27.7/32.5/24.2 & 22.2/28.0/18.4 & 37.0/36.1/38.0 & 30.7/35.5/27.0 & 29.1/37.6/23.8 & 29.6/33.9/26.3 \\
30
+ InternVL2.5-8B \cite{chen2024expanding} & 26.6/32.0/22.8 & 21.3/28.9/16.9 & 32.7/37.2/29.1 & 27.9/35.4/23.0 & 28.9/39.9/22.7 & 27.6/34.7/22.9 \\
31
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} & 36.6/38.5/34.8 & 29.3/34.6/25.5 & 39.6/44.7/35.5 & 33.0/39.2/28.4 & 33.6/44.6/26.9 & 34.6/40.3/30.2 \\
32
+ \midrule
33
+ Tarsier2-7B & \underline{\textbf{44.4}}/\textbf{41.9}/\underline{\textbf{47.3}} & \underline{\textbf{39.3}}/\underline{\textbf{39.5}}/\underline{\textbf{39.1}} & \underline{\textbf{45.7}}/\textbf{45.4}/\underline{\textbf{46.0}} & \underline{\textbf{36.0}}/38.4/\underline{\textbf{33.9}} & \underline{\textbf{43.7}}/\underline{\textbf{48.9}}/\underline{\textbf{39.4}} & \underline{\textbf{42.0}}/\textbf{42.8}/\underline{\textbf{41.1}} \\
34
+ \bottomrule
35
+ \end{tabular}
36
+ }
37
+ \caption{Evaluation results on DREAM-1K. We report F1/Precision/Recall scores for each category and for the overall dataset. For open-source models, all results are tested with their official checkpoint and inference code under recommended setting. SOTA results of comparable scale ($<$10B) are bolded and overall best results are underlined.}
38
+ \label{tab:dream-1k}
39
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/10.tex ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[t]
2
+ \centering
3
+ \scriptsize
4
+ \setlength{\tabcolsep}{3pt} % 调整列间距
5
+ \resizebox{\textwidth}{!}{\begin{tabular}{l|ccc|cc|c}
6
+ \toprule
7
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{3}{c|}{\textbf{Caption}} & \multicolumn{2}{c|}{\textbf{Video QA}} & \multirow{2}{*}{\textbf{Hallucination}} \\
8
+ & DREAM-1K & TempCompass-cg & Vinoground-Text & Short & Long & \\
9
+ \midrule
10
+ Tarsier2-7B & 42.0 & 66.6 & 65.8 & 56.1 & 62.8 & 74.0 \\
11
+ \midrule
12
+ \quad \textit{w/o DPO} & 40.8 ($\downarrow$1.2) & 62.1 ($\downarrow$6.5) & 60.6 ($\downarrow$5.6) & 56.2 ($\uparrow$0.1) & 63.2 ($\uparrow$0.4) & 71.9 ($\downarrow$2.1) \\
13
+ \quad \textit{w/o NS} & 41.5 ($\downarrow$0.5) & 61.1 ($\downarrow$5.5) & 59.8 ($\downarrow$6.0)& 56.1 ($\downarrow$0.0) & 62.8 ($\downarrow$0.0) & 72.9 ($\downarrow$1.1) \\
14
+ \quad \textit{w/o PF} & 40.5 ($\downarrow$1.5) & 65.1 ($\downarrow$1.5) & 67.6 ($\uparrow$1.8) & 56.0 ($\downarrow$0.1) & 62.3 ($\downarrow$0.5) & 74.2 ($\uparrow$0.2) \\
15
+ \bottomrule
16
+ \end{tabular}}
17
+ \caption{Ablation study for DPO training phase, negative sampling (NS) and preference data filtering (PF) strategies.}
18
+ \label{tab:dpo_ablation}
19
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/11.tex ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[t]
2
+ \centering
3
+ \scriptsize
4
+ \setlength{\tabcolsep}{3pt} % 调整列间距
5
+ \resizebox{\textwidth}{!}{\begin{tabular}{l|ccc|cc|c}
6
+ \toprule
7
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{3}{c|}{\textbf{Caption}} & \multicolumn{2}{c|}{\textbf{Video QA}} & \multirow{2}{*}{\textbf{Hallucination}} \\
8
+ & DREAM-1K & TempCompass-cg & Vinoground-Text & Short & Long & \\
9
+ \midrule
10
+ % Tarsier2-7B & 42.0 & 66.6 & 65.8 & 56.1 & 62.6 & 74.0 \\
11
+ % \midrule
12
+ % Tarsier-7B & 34.6 & 55.3 & 29.8 & 45.6 & 46.3 & 56.3 \\
13
+ % \makecell[l]{+ \textit{Recaption FT}} & 31.6 & 52.9 & 27.4 & 44.1 & 45.2 & 46.3 \\
14
+ % \makecell[l]{+ \textit{Original FT}} & xxx & xxx & xxx & xxx & xxx & xxx \\
15
+ % \midrule
16
+ Qwen2-VL-7B \cite{qwen2vl} & 31.2 & 54.2 & 40.0 & 49.4 & 60.3 & 51.9 \\
17
+ \midrule
18
+ \makecell[l]{+ \textit{Original FT}} & 35.2 ($\uparrow$4.0) & 49.9 ($\downarrow$4.3) & 39.0 ($\downarrow$1.0) & 46.9 ($\downarrow$2.5) & 55.4 ($\downarrow$4.9) & 43.0 ($\downarrow$8.9) \\
19
+ \makecell[l]{+ \textit{Recaption FT}} & 39.5 ($\uparrow$8.3) & 67.7 ($\uparrow$13.5) & 55.0 ($\uparrow$15.0) & 52.5 ($\uparrow$3.1) & 56.8 ($\downarrow$3.5) & 68.5 ($\uparrow$16.6) \\
20
+ \bottomrule
21
+ \end{tabular}}
22
+ \caption{The experimental results of recaptioning. ``\textit{Recaption FT}'' represents fine-tune the model on the Tarsier2-Recap-585K dataset. ``\textit{Original FT}'' represents fine-tune the model with the same videos as Tarsier2-Recap-585K but taking their original labels as target output.}
23
+ \label{tab:recaption}
24
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/12.tex ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{\textwidth}{!}{
4
+ \begin{tabular}{l |c c c c}
5
+ \toprule
6
+ \textbf{Configuration} & \textbf{Pre-training} & \textbf{SFT-1} & \textbf{SFT-2} & \textbf{DPO} \\\midrule
7
+ VLM init. & Qwen2-VL-7B & Tarsier2-Pre-trian & Tarsier2-SFT-1 & Tarsier2-SFT-2 \\
8
+ Optimizer name & \multicolumn{4}{c}{AdamW} \\
9
+ Optimizer $\beta_1$ & \multicolumn{4}{c}{$0.9$}\\
10
+ Optimizer $\beta_2$ & \multicolumn{4}{c}{$0.999$}\\
11
+ Optimizer eps & \multicolumn{4}{c}{$1e^{-6}$}\\
12
+ Learning rate & $2e^{-5}$ & $2e^{-5}$ & $2e^{-6}$ & $1e^{-6}$\\
13
+ Learning rate schedule & \multicolumn{4}{c}{cosine} \\
14
+ Training steps & 200,000 & 5,000 & 5,000 & 1,000\\
15
+ Warm-up steps & 1,000 & 250 & 250 & 100 \\
16
+ Weight decay & \multicolumn{4}{c}{0.01}\\
17
+ Gradient clip & \multicolumn{4}{c}{1.0} \\
18
+ Dropout rate & \multicolumn{4}{c}{0.0}\\
19
+ Global batch size & 384 & 64 & 64 & 64 \\
20
+ Max pixels & \multicolumn{4}{c}{460,800} \\
21
+ Frames per video & [8,128] & 16 & 16 & 16 \\
22
+ Numerical precision & \multicolumn{4}{c}{bfloat16} \\
23
+ \bottomrule
24
+ \end{tabular}
25
+ }
26
+ \caption{Training hyper-parameters of \modelname}
27
+ \label{tab:hyperparam}
28
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/13.tex ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \small
4
+ \setlength{\tabcolsep}{3pt} % 调整列间距
5
+ \resizebox{\textwidth}{!}{
6
+ \begin{tabular}{llll}
7
+ \toprule
8
+ \multicolumn{4}{l}{\textit{\textbf{Video Captioning}}} \\
9
+ WebVid~\cite{bain2021frozen} (2.9M) &
10
+ LSMDC~\cite{rohrbach2017movie} (109K) &
11
+ TGIF~\cite{li2016tgif} (105K) &
12
+ ActivityNet~\cite{krishna2017dense} (38K) \\
13
+ Charades~\cite{sigurdsson2016hollywood} (16K) &
14
+ Charades-Ego~\cite{sigurdsson2018charades} (6K) &
15
+ YouCook2~\cite{zhou2018youcook2} (9K) &
16
+ TACoS~\cite{regneri2013grounding} (18K)\\
17
+ Ego4D~\cite{grauman2022ego4d} (1.1M) &
18
+ Spoken Moments~\cite{monfort2021spoken} (493K) &
19
+ Multi-Moments~\cite{monfort2021multi} (997K) &
20
+ TREC-VTT~\cite{awad2023trecvid} (64K) \\
21
+ ShareGPT-4o-video~\cite{sharegpt4o} (2K) &
22
+ MovieStory101\cite{he2024storyteller} (11K) &
23
+ GPT4o-labeled Caption$^\dagger$ (2.5M) &
24
+ Human-labeled Caption$^\dagger$ (145K) \\
25
+ Film\&TV Commentary$^\dagger$ (11.5M) &
26
+ \\
27
+
28
+ \midrule
29
+ \multicolumn{4}{l}{\textit{\textbf{Action Recognition}}} \\
30
+ HMDB~\cite{kuehne2011hmdb} (5.8K) &
31
+ COIN~\cite{tang2019coin} (10K) &
32
+ SSV2~\cite{goyal2017something} (169K) &
33
+ Kinetics-700~\cite{carreira2017quo} (537K) \\
34
+ FineAction~\cite{liu2022fineaction} (82K) &
35
+ RareAct~\cite{miech2020rareact} (2K) &
36
+ 20BN-jester~\cite{materzynska2019jester} (46K) & \\
37
+
38
+ \midrule
39
+ \multicolumn{4}{l}{\textit{\textbf{Video QA}}} \\
40
+ CLEVRER~\cite{yi2019clevrer} (83K) &
41
+ TGIF-QA~\cite{jang2017tgif} (72K) &
42
+ EgoQA~\cite{fan2019egovqa} (5K) &
43
+ VideoInstruct~\cite{maaz2023video} (89K) \\
44
+ LLaVA-Video-178K~\cite{zhang2024video} (165K) &
45
+ M4-Instruct-video~\cite{li2024llava} (255K) &
46
+ GPT4o-labeled QA$^\dagger$ (16.2K) &
47
+ \\
48
+
49
+ \midrule
50
+ \multicolumn{4}{l}{\textit{\textbf{Grounding}}} \\
51
+ DiDeMo~\cite{anne2017localizing} (82K) &
52
+ AVA~\cite{gu2018ava} (28K) &
53
+ E.T. Instruct 164K~\cite{liu2024etbench} (147K) &
54
+ Object Tracking$^\dagger$ (745K) \\
55
+
56
+ \midrule
57
+ \multicolumn{4}{l}{\textit{\textbf{Video Self-Supervised Training}}} \\
58
+ Frame Order Prediction$^\dagger$ (825K) \\
59
+
60
+ \midrule
61
+ \multicolumn{4}{l}{\textit{\textbf{Intent Recognition}}} \\
62
+ Oops!~\cite{epstein2020oops} (15K) & & & \\
63
+
64
+
65
+
66
+ \midrule
67
+ \multicolumn{4}{l}{\textit{\textbf{Multi-Image Understanding}}} \\
68
+ VIST~\cite{huang2016visual} (38K) &
69
+ MMDU~\cite{liu2024mmdu} (45K) &
70
+ M4-Instruct-image~\cite{li2024llava} (616K) &
71
+ Image Retrival$^\dagger$ (533K) \\
72
+
73
+ \midrule
74
+ \multicolumn{4}{l}{\textit{\textbf{Single-Image Understanding}}} \\
75
+ ShareGPT4V~\cite{chen2023sharegpt4v} (95K) &
76
+ LLaVA-1.5~\cite{liu2023improved} (643K) &
77
+ ShareGPT-4o-image\cite{sharegpt4o} (57K) &
78
+ MS COCO~\cite{lin2014microsoft} (566K) \\
79
+ Flicker~\cite{plummer2015flickr30k} (145K) &
80
+ LLaVA-ReCap-CC3M~\cite{li2024llava} (2.9M) &
81
+ Visual Genome~\cite{krishna2017visual} (759K) &
82
+ SBU Captions~\cite{ordonez2011im2text} (860K) \\
83
+ GPT4o-labeled Caption$^\dagger$ (1.13M) \\
84
+
85
+ \midrule
86
+ \multicolumn{4}{l}{\textit{\textbf{Image OCR}}} \\
87
+ RCTW-17~\cite{shi2017icdar2017} (8K) &
88
+ LSVT~\cite{sun2019icdar} (430K) &
89
+ ReCTS~\cite{zhang2019icdar} (20K) &
90
+ Art~\cite{bhagavatula2019abductive} (5.6K) \\
91
+ COCOTextV2~\cite{veit2016coco} (16K) &
92
+ CORD-v2~\cite{park2019cord} (1K) &
93
+ HierText~\cite{long2022towards} (10K) &
94
+ MSRA-TD500~\cite{yao2012detecting} (465) \\
95
+ IC03~\cite{lucas2005icdar} (499) &
96
+ SynthDoG-en~\cite{kim2022donut} (100K) &
97
+ SynthDoG-zh~\cite{kim2022donut} (100K) & \\
98
+
99
+ \midrule
100
+ \multicolumn{4}{l}{\textit{\textbf{Text Generation}}} \\
101
+ OpenOrca~\cite{lian2023openorca} (995K) &
102
+ ShareGPT~\cite{vicuna2023} (80K) & & \\
103
+
104
+ \bottomrule
105
+ \end{tabular}
106
+ }
107
+ \caption{Datasets and their sizes used in \modelname pre-training. $\dagger$ indicates in-house datasets.}
108
+ \label{tab:pretraining-datasets}
109
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/14.tex ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.9\textwidth}{!}{%
4
+ \begin{tabular}{cl|ccc}
5
+ \toprule
6
+ \textbf{Capability} & \textbf{Benchmark} & \textbf{Tarsier1-7B} & \textbf{Tarsier1-7B-Qwen} & \textbf{Tarsier2-7B} \\
7
+
8
+ \midrule
9
+ \multirow{3}{*}{Caption} & DREAM-1K & 34.6/30.2/40.3 & 38.4/40.6/36.4 & 40.8/42.5/39.3 \\
10
+ & TempCompass-cg & 55.3 & 59.3 & 60.1 \\
11
+ & Vinoground-Text & 29.8 & 48.6 & 60.2 \\
12
+ \midrule
13
+ \multirow{3}{*}{Video QA Short} & MVBench & 62.6 & 69.8 & 72.8 \\
14
+ & TVBench & 45.8 & 51.0 & 53.5 \\
15
+ & TOMATO & 28.6 & 36.5 & 39.5 \\
16
+ \midrule
17
+ \multirow{3}{*}{Video QA Long} & Video-MME & 42.2 & 58.9 & 65.3 \\
18
+ & LongVideoBench & 39.8 & 52.1 & 58.3 \\
19
+ & TemporalBench & 56.9 & 61.9 & 68.7 \\
20
+ \midrule
21
+ \multirow{2}{*}{Hallucination} & EventHallusion-Y/N & 70.9 & 75.6 & 77.8 \\
22
+ & EventHallusion-Desc & 41.6 & 48.6 & 49.1\\
23
+ \bottomrule
24
+ \end{tabular}
25
+ }
26
+ \caption{Detailed results of the ablation study for pre-training. For the captioning task, results are reported after the SFT stage. For other tasks, results are reported after the pre-training stage. }
27
+ \label{tab:appendix-pretrain_detailed_results}
28
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/15.tex ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.9\textwidth}{!}{%
4
+ \begin{tabular}{cl|ccc}
5
+ \toprule
6
+ \textbf{Capability} & \textbf{Benchmark} & \makecell[c]{\\ pre-train} & \makecell[c]{\textbf{Tarsier2-7B}\\ SFT w/o grounding} & \makecell[c]{\\ SFT} \\
7
+ \midrule
8
+ \multirow{3}{*}{Caption} & DREAM-1K & 35.2/36.8/33.7 & 37.4/38.6/36.3 & 40.8/42.5/39.3 \\
9
+ & TempCompass-cg & 50.5 & 50.2 & 60.1 \\
10
+ & Vinoground-Text & 57.2 & 60.6 & 60.2 \\
11
+ \midrule
12
+ \multirow{3}{*}{Video QA Short} & MVBench & 72.8 & 71.9 & 72.5 \\
13
+ & TVBench & 53.5 & 54.5 & 54.2 \\
14
+ & TOMATO & 39.5 & 41.3 & 41.9 \\
15
+ \midrule
16
+ \multirow{3}{*}{Video QA Long} & Video-MME & 65.3 & 64.0 & 64.7 \\
17
+ & LongVideoBench & 58.3 & 54.7 & 58.2 \\
18
+ & TemporalBench & 68.7 & 66.9 & 66.6 \\
19
+ \midrule
20
+ \multirow{2}{*}{Hallucination} & EventHallusion-Y/N & 77.8 & 80.1 & 84.4 \\
21
+ & EventHallusion-Desc & 49.1 & 56.2 & 59.4 \\
22
+ \bottomrule
23
+ \end{tabular}
24
+ }
25
+ \caption{Detailed results of the ablation study for SFT.}
26
+ \label{tab:appendix-sft_detailed_results}
27
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/16.tex ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.9\textwidth}{!}{%
4
+ \begin{tabular}{cl|cccc}
5
+ \toprule
6
+ \textbf{Capability} & \textbf{Benchmark} & \textbf{Tarsier2-7B} & \textit{w/o DPO} & \textit{w/o NS} & \textit{w/o PF}\\
7
+ \midrule
8
+ \multirow{3}{*}{Caption} & DREAM-1K & 42.0/42.8/41.1 & 40.8/42.5/39.3 & 41.5/44.5/39.0 & 40.5/39.9/41.1 \\
9
+ & TempCompass-cg & 66.6 & 60.1 & 62.1 & 65.1 \\
10
+ & Vinoground-Text & 65.8 & 60.2 & 60.6 & 67.6 \\
11
+ \midrule
12
+ \multirow{3}{*}{Video QA Short} & MVBench & 71.5 & 72.5 & 72.2 & 71.7 \\
13
+ & TVBench & 54.7 & 54.2 & 54.9 & 54.6 \\
14
+ & TOMATO & 42.0 & 41.9 & 41.3 & 41.8 \\
15
+ \midrule
16
+ \multirow{3}{*}{Video QA Long} & Video-MME & 64.5 & 64.7 & 64.3 & 64.4 \\
17
+ & LongVideoBench & 58.6 & 58.2 & 58.6 & 57.4 \\
18
+ & TemporalBench & 65.3 & 66.6 & 65.4 & 65.2 \\
19
+ \midrule
20
+ \multirow{2}{*}{Hallucination} & EventHallusion-Y/N & 84.6 & 84.4 & 85.1 & 84.8 \\
21
+ & EventHallusion-Desc & 63.3 & 59.4 & 60.7 & 63.5 \\
22
+ \bottomrule
23
+ \end{tabular}
24
+ }
25
+ \caption{Detailed results of the ablation study for DPO.}
26
+ \label{tab:appendix-dpo_detailed_results}
27
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/17.tex ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.9\textwidth}{!}{%
4
+ \begin{tabular}{cl|ccc}
5
+ \toprule
6
+ \textbf{Capability} & \textbf{Benchmark} & \textbf{Qwen2-VL-7B} \cite{qwen2vl} & \textit{$+$ Original FT} & \textit{$+$ Recaption FT} \\
7
+ \midrule
8
+ \multirow{3}{*}{Caption} & DREAM-1K & 29.6/33.9/26.3 & 35.2/44.8/29.0 & 39.5/41.7/37.6 \\
9
+ & TempCompass-cg & 54.2 & 49.9 & 67.7 \\
10
+ & Vinoground-Text & 40.0 & 39.0 & 55.0 \\
11
+ \midrule
12
+ \multirow{3}{*}{Video QA Short} & MVBench & 67.0 & 59.8 & 66.8 \\
13
+ & TVBench & 43.8 & 47.2 & 51.1 \\
14
+ & TOMATO & 31.5 & 33.6 & 39.5 \\
15
+ \midrule
16
+ \multirow{3}{*}{Video QA Long} & Video-MME & 63.3 & 56.1 & 57.0\\
17
+ & LongVideoBench & 55.6 & 51.4 & 51.9 \\
18
+ & TemporalBench & 62.0 & 58.7 & 61.4 \\
19
+ \midrule
20
+ \multirow{2}{*}{Hallucination} & EventHallusion-Y/N & 68.6 & 39.6 & 80.7 \\
21
+ & EventHallusion-Desc & 27.8 & 46.3 & 56.2 \\
22
+ \bottomrule
23
+ \end{tabular}
24
+ }
25
+ \caption{Detailed results of the recaptioning experiment.}
26
+ \label{tab:appendix-recap_detailed_results}
27
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/18.tex ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[t]
2
+ \centering
3
+ \resizebox{\textwidth}{!}{%
4
+ \begin{tabular}{lccrrr}
5
+ \toprule
6
+ \textbf{Dataset} & \textbf{Original Label Type} & \textbf{Split} & \textbf{Avg Duration (s)} & \textbf{\# Sampled Clips} & \textbf{Proportion (\%)} \\
7
+ \midrule
8
+ % \midrule
9
+ WebVid-10M~\cite{bain2021frozen} & \multirow{9}{*}{Video Caption} & - & 15.2 & 177,909 & 30.38 \\
10
+ LSMDC~\cite{rohrbach2017movie} & & \textbf{train}/\textbf{val}/\textbf{test} & 4.1 & 108,271 & 18.49 \\
11
+ TGIF~\cite{li2016tgif} & & \textbf{train}/test & 12.3 & 94,775 & 16.18 \\
12
+ Ego4D~\cite{grauman2022ego4d} & & - & 4.1 & 50,000 & 8.54 \\
13
+ ActivityNet~\cite{krishna2017dense} & & \textbf{train}/\textbf{val}/test & 35.7 & 35,960 & 6.14 \\
14
+ VATEX~\cite{wang2019vatex} & & \textbf{train}/\textbf{val}/test & 10.0 & 22,435 & 3.83 \\
15
+ TREC-VTT~\cite{awad2023trecvid} & & \textbf{train}/val & 6.3 & 14,199 & 2.42 \\
16
+ Charades~\cite{sigurdsson2016hollywood} & & \textbf{train}/test & 29.8 & 7,985 & 1.36 \\
17
+ Charades-Ego~\cite{sigurdsson2018charades} & & \textbf{train}/test & 30.2 & 6,161 & 1.05 \\
18
+ \midrule
19
+ % \midrule
20
+ Kinetics-700~\cite{carreira2017quo} & \multirow{2}{*}{Action Recognition} & \textbf{train}/val/test & 8.9 & 50000 & 8.50 \\
21
+ SSV2~\cite{goyal2017something} & & \textbf{train}/val/test & 3.7 & 10000 & 1.71 \\
22
+ \midrule
23
+ % \midrule
24
+ Oops~\cite{epstein2020oops} & Intent Recognition & \textbf{train}/\textbf{val} & 9.8 & 7,948 & 1.36 \\
25
+ \midrule
26
+ \textbf{Sum} & - & - & \textbf{1,972 hours} & \textbf{585,643} & \textbf{100.00} \\
27
+ \bottomrule
28
+ \end{tabular}
29
+ }
30
+ \caption{Data composition of Tarsier2-Recap-585K. The ``Split'' column lists the original dataset partitioning, and we use bold to mark the parts which we sampled the video clips from to conduct recaptioning.}
31
+ \label{tab:recaption_composition}
32
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/2.tex ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.8\textwidth}{!}{%
4
+ \begin{tabular}{l|cccccc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{6}{c}{\textbf{E.T. Bench-Captioning} \cite{liu2024etbench}} \\
7
+ & DVC$_{F1}$ & DVC$_{Sim}$ & SLC$_{F1}$ & SLC$_{Sim}$ & \textbf{Avg}$_{F1}$ & \textbf{Avg}$_{Sim}$\\
8
+ \midrule
9
+ \multicolumn{5}{l}{\textit{Proprietary models}} \\
10
+ GPT-4V \cite{gpt4v} & \color{lightgray}16.1 & \color{lightgray}19.4 & \color{lightgray}21.9 & \color{lightgray}13.5 & \color{lightgray}19.0 & \color{lightgray} 16.4\\
11
+ GPT-4o \cite{gpt4o} & \color{lightgray}\underline{46.9} & \color{lightgray}22.3 & \color{lightgray}23.1 & \color{lightgray}14.9 & \color{lightgray}35.0 & \color{lightgray}18.6\\
12
+ Gemini-1.5-Flash \cite{geminiteam2024gemini15unlockingmultimodal} & \color{lightgray}31.6 & \color{lightgray}14.9 & \color{lightgray}16.5 & \color{lightgray}13.3 & \color{lightgray}24.1 & \color{lightgray}14.1\\
13
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & \color{lightgray}24.0 & \color{lightgray}17.5 & \color{lightgray}5.8 & \color{lightgray}9.8 & \color{lightgray}14.9 & \color{lightgray}13.7\\
14
+ \midrule
15
+ \multicolumn{5}{l}{\textit{Open-source models ($>$10B)}} \\
16
+ PLLaVA-34B \cite{xu2024pllava} &13.3 & 10.6 & 9.7 & 11.8 & 11.5 & 11.2 \\
17
+ % VILA-40B & 33.2 & 37.6 & 29.7& - & - & -& -\\
18
+ LLaVA-OV-72B \cite{li2024llavanext} & 41.9 & 16.3 & 25.6 & 13.9 & 33.8 & 15.1 \\
19
+ LLaVA-Video-72B \cite{zhang2024video} & 37.0 & 15.7 & 20.4 & 13.5 & 28.7 & 14.6 \\
20
+ Qwen2-VL-72B \cite{qwen2vl} & 15.3 & 13.9 & 11.0& 12.8 & 13.2 & 13.4 \\
21
+ \midrule
22
+ \multicolumn{5}{l}{\textit{Open-source models ($\leq$10B)}} \\
23
+ VideoLLaMA2-7B \cite{cheng2024videollama2} & 0.6 & 14.5 & 0.0 & 15.2 & 0.3 & 14.8 \\
24
+ Video-LLaVA-7B \cite{lin2023video} & 28.0 & 15.0 & 0.9 & 8.3 & 14.4 & 11.7\\
25
+ LLaVA-OV-7B \cite{li2024llavanext} & 22.0 & 15.1 & 9.5 & 10.6 & 15.8 & 12.8 \\
26
+ LLaVA-Video-7B \cite{zhang2024video} & 20.6 & 14.7 & 6.5 & 13.4 & 13.6 & 14.1 \\
27
+ E.T. Chat \cite{liu2024etbench} $^\dag$ & 38.4 & 19.7 & 24.4 & 14.6 & 31.4 & 17.1 \\
28
+ % Qwen2-VL-7B & 12.9 & 13.3 & 4.5 & 11.7 \\
29
+ Qwen2-VL-7B \cite{qwen2vl} $^\dag$ & 44.3 & 25.3 & \underline{\textbf{25.7}} & 15.6 & 35.0 & 20.4 \\
30
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} $^\dag$ & 42.8 & 19.1 & 23.7 & 15.2 & 33.2 & 17.1 \\
31
+ \midrule
32
+ % Tarsier2-7B & 30.3 & 18.2 & 16.6 & 11.8 \\
33
+ Tarsier2-7B $^\dag$& \textbf{46.5} & \underline{\textbf{28.8}} & 24.6 & \underline{\textbf{16.4}} & \underline{\textbf{35.5}} & \underline{\textbf{22.6}} \\
34
+ \bottomrule
35
+ \end{tabular}%
36
+ }
37
+ \caption{Evaluation results on E.T. Bench-Captioning. Results marked in gray are tested on a subset. $\dag$ denotes the model is fine-tuned on E.T. Instruct 164K. All results are transcribed from the official benchmark, except for LLaVA-OV, LLaVA-Video and Qwen2-VL, which are our evaluation using the official checkpoint and inference code.}
38
+ \label{tab:dense_caption}
39
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/3.tex ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.98\textwidth}{!}{%
4
+ \begin{tabular}{l|cccccc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \textbf{MVBench}\cite{li2024mvbench} & \textbf{PerceptionTest}\cite{patraucean2024perception} & \textbf{TVBench}\cite{cores2024tvbench} & \textbf{TOMATO}\cite{shangguan2024tomato} & \textbf{Vinoground}\cite{zhang2024vinoground} & \textbf{TempCompass}\cite{liu2024tempcompass} \\
7
+ & test & val & test & test & Text/Video/Group & mc/yn/cm/cg\\
8
+ \midrule
9
+ \multicolumn{6}{l}{\textit{Proprietary models}} \\
10
+ % GPT-4V & 43.5& -& - & - & - \\
11
+ GPT-4o \cite{gpt4o} & 57.5 & -&39.6 & 37.7 & 54.0/\underline{38.2}/24.6 & 71.0/73.7/80.8/70.8 \\
12
+ % Gemini-1.5-Flash\cite{geminiteam2024gemini15unlockingmultimodal} & 54.1& -& - & 27.8 & - \\
13
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & -& -& 46.5 & 36.1 & 35.8/22.6/10.2 & 63.9/70.3/77.5/57.9\\
14
+ \midrule
15
+ \multicolumn{6}{l}{\textit{Open-source models ($>$10B)}} \\
16
+ % VILA-40B & 43.2& 54.0& 45.4 & - & 41.2/23.2/8.8 \\
17
+ % PLLaVA-34B & 58.1&- & 41.9 & - & - \\
18
+ % VideoLLaMA2-72B & 62.0& -& - & - & 36.2/21.8/8.4\\
19
+ LLaVA-OV-72B \cite{li2024llavanext} & 59.4& 66.9& 45.9 & 28.6 & 48.4/35.2/21.8 & 67.6/72.6/78.2/52.6 \\
20
+ LLaVA-Video-72B \cite{zhang2024video} & 64.1&
21
+ \underline{74.3}*& 50.0 & 28.2 & 52.0/35.6/20.8 & 69.9/73.0/80.9/54.4 \\
22
+ Qwen2-VL-72B \cite{qwen2vl} &
23
+ \underline{73.6} & 66.5 & 52.7 & 37.9 & 50.4/32.6/17.4 & \underline{76.0}/\underline{75.9}/\underline{84.6}/58.6 \\
24
+ Tarsier-34B \cite{wang2024tarsierrecipestrainingevaluating} & 67.6&60.4&53.8&34.3&37.8/32.0/15.0 & 69.8/74.0/73.0/60.9 \\
25
+ % \textbf{SOTA} & \\
26
+ \midrule
27
+ \multicolumn{6}{l}{\textit{Open-source models ($\leq$10B)}} \\
28
+ % LongVA-7B & -& -& - & - & - \\
29
+ % IXC-2.5-7B & 69.1& 34.4& - & - & - \\
30
+ LLaVA-OV-7B \cite{li2024llavanext} & 56.7& 57.1& 45.6 & 25.5& 41.6/29.4/14.6 & 64.8/69.7/73.8/49.9\\
31
+ LLaVA-Video-7B \cite{zhang2024video} & 58.6 & 67.9*& 45.6 & 24.9 & 36.8/29.0/12.8 & 56.3/68.7/76.8/53.0\\
32
+ Qwen2-VL-7B \cite{qwen2vl} & 67.0& - & 43.8 & 31.5& 40.0/23.4/12.4 & 68.5/72.8/77.3/54.2\\
33
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} & 62.6&53.9&45.8&28.6&29.8/22.2/8.6 & 58.7/58.0/54.2/55.3\\
34
+ Previous SOTA & \textbf{72.0} \cite{chen2024expanding} & 70.0* \cite{liu2024oryx} & 51.6 \cite{zhang2024internlmxcomposer} & 31.5 \cite{qwen2vl} & 41.6/29.4/14.6 \cite{li2024llava} &
35
+ 68.5/72.8/77.3/54.2 \cite{qwen2vl}\\
36
+ \midrule
37
+ Tarsier2-7B & 71.5& \textbf{71.6}* & \underline{\textbf{54.7}} & \underline{\textbf{42.0}} & \underline{\textbf{65.8}}/\textbf{38.0}/\underline{\textbf{28.8}} & \textbf{75.3}/\textbf{75.1}/\textbf{80.6}/\underline{\textbf{66.6}}\\
38
+ \bottomrule
39
+ \end{tabular}%
40
+ }
41
+ \caption{Evaluation results on short video question answering benchmarks. * indicates that the training set has been observed in the training data mixture.}
42
+ \label{tab:my_label}
43
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/4.tex ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{0.98\textwidth}{!}{%
4
+ \begin{tabular}{l|ccccc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \textbf{Video-MME}\cite{fu2024video} & \textbf{LongVideoBench}\cite{wu2024longvideobench} & \textbf{TemporalBench}\cite{cai2024temporalbench} & \textbf{MLVU}\cite{zhou2024mlvu} &\textbf{MMBench-Video}\cite{fang2024mmbench}\\
7
+ & w/o subs & val & Binary Accuracy & M-Avg & val \\
8
+ \midrule
9
+ \multicolumn{6}{l}{\textit{Proprietary models}} \\
10
+ GPT-4o \cite{gpt4o} & 71.9 & \underline{66.7} & \underline{73.2} & 64.6 & 1.87 \\
11
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & \underline{75.0} & 64.0 & 66.4 & - & 1.30 \\
12
+
13
+ \midrule
14
+ \multicolumn{6}{l}{\textit{Open-source models ($>$10B)}} \\
15
+ VILA-1.5-40B \cite{lin2024vila} & 60.1 & - & - & 56.7 & 1.61 \\
16
+ LLaVA-Video-72B \cite{zhang2024video} & 70.5 & 61.9 & 72.4 & 74.4 & 1.71 \\
17
+ Qwen2-VL-72B \cite{qwen2vl} & 71.2 & - & 70.2 & - & 1.70 \\
18
+ InternVL2.5-78B \cite{chen2024expanding} & 72.1 & 63.6 & - & \underline{75.7} &
19
+ \underline{1.97} \\
20
+ Tarsier-34B \cite{wang2024tarsierrecipestrainingevaluating} & 52.3 & 54.2 & 66.7 & 58.2 & 1.46 \\
21
+ \midrule
22
+ \multicolumn{6}{l}{\textit{Open-source models ($\leq$10B)}} \\
23
+ LLaVA-Video-7B \cite{zhang2024video} & 63.3 & 58.2 & 63.6 & 70.8 & 1.60 \\
24
+ Qwen2-VL-7B \cite{qwen2vl} & 63.3 & 55.6 & 62.0 & - & 1.44 \\
25
+ InternVL2.5-8B \cite{chen2024expanding} & 64.2 & 60.0 & - & 68.9 & 1.68 \\
26
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} & 42.2 & 39.8 & 56.9 & 49.3 & - \\
27
+ Previous SOTA & 64.2 \cite{liu2024nvila} & 60.0 \cite{chen2024expanding} & 63.6 \cite{zhang2024video} & 70.9 \cite{zohar2024apolloexplorationvideounderstanding} & 1.70 \cite{yao2024minicpm} \\
28
+ \midrule
29
+ \modelname-7B & \textbf{64.5} (128f) & 58.6 (128f) & \textbf{65.3} (128f) & 67.9 (256f) & \textbf{1.82} (128f) \\
30
+ \bottomrule
31
+ \end{tabular}%
32
+ }
33
+ \caption{Evaluation results on long-video question answering benchmarks. We list the number of frames used for each benchmark during evaluating \modelname.}
34
+ \label{tab:results-long-video-qa}
35
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/5.tex ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h!]
2
+ \centering
3
+ \resizebox{\textwidth}{!}{%
4
+ \begin{tabular}{l|c|cc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \textbf{VideoHallucer} \cite{wang2024videohallucer} & \multicolumn{2}{c}{\textbf{EventHallusion }\cite{zhang2024eventhallusion}} \\
7
+ \cline{2-4}
8
+ & Yes/No QA & Yes/No QA & Desc GPT \\
9
+ & Basic/Hallucinated/\textbf{Overall} & Entire/Interleave/Misleading/\textbf{Overall} & Entire/Interleave/Misleading/\textbf{Overall} \\
10
+ % \midrule
11
+ % Human & 90.0 / 88.8 / 85.0 & - & - \\
12
+ \midrule
13
+ \multicolumn{4}{l}{\textit{Proprietary models}}\\
14
+ GPT-4o \cite{gpt4o} & 75.1/74.2/53.3 & 65.8/90.7/92.2/84.1 & 34.9/54.9/83.2/56.2 \\
15
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & 83.6/42.3/37.8 & 70.2/77.7/96.1/80.2 & 38.5/40.9/80.0/49.6 \\
16
+ \midrule
17
+ \multicolumn{4}{l}{\textit{Open-Source models ($>$10B)}} \\
18
+ Qwen2-VL-72B \cite{qwen2vl} & 87.1/79.4/\underline{70.2} & 33.3/77.7/56.4/60.0 & 16.5/25.4/70.2/33.6 \\
19
+ LLaVA-OV-72B \cite{li2024llavanext} & 88.3/62.6/55.2 & 47.4/26.9/90.1/48.3 & 24.8/34.7/71.3/40.7 \\
20
+ LLaVA-Video-72B \cite{zhang2024video} & 88.2/73.5/64.6 & 57.9/11.9/96.0/45.6 & 32.1/35.8/75.5/44.2 \\
21
+ InternVL2.5-78B \cite{chen2024expanding} & 82.5/82.5/67.8 & 57.9/67.9/88.2/70.2 & 45.0/43.0/76.8/51.6 \\
22
+ Tarsier-34B \cite{wang2024tarsierrecipestrainingevaluating} & 84.8/80.0/67.7 & 49.1/92.7/69.6/74.8 & 38.5/40.4/83.2/50.1 \\
23
+ \midrule
24
+ \multicolumn{4}{l}{\textit{Open-Source models ($\leq$10B)}} \\
25
+ % PLLaVA-7B \cite{xu2024pllava} & 75.1/55.5/38.1 & \\
26
+ LLaVA-OV-7B \cite{li2024llavanext} & 81.1/69.6/53.8 & 46.5/67.4/86.1/66.2 & 22.0/26.4/73.4/36.4 \\
27
+ LLaVA-Video-7B \cite{zhang2024video} & 82.4/70.6/56.0 & 61.4/48.7/96.0/64.0 & 27.5/32.6/75.5/41.4 \\
28
+ Qwen2-VL-7B \cite{qwen2vl} & 85.0/70.8/59.3 & 35.1/94.3/57.4/68.6 & 14.7/16.1/67.0/27.8 \\
29
+ InternVL2.5-8B \cite{chen2024expanding} &72.7/78.3/53.6&46.5/69.2/90.2/68.2&23.9/20.7/60.0/31.0\\
30
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} & 76.4/60.8/41.4 & 43.9/82.4/79.4/70.9 & 35.8/29.5/72.6/41.6 \\
31
+ \midrule
32
+ \modelname-7B & 86.5/78.3/\textbf{67.0}&60.5/93.3/95.1/\underline{\textbf{84.6}}&54.6/53.1/93.7/\underline{\textbf{63.3}} \\
33
+ \bottomrule
34
+ \end{tabular}}
35
+ \caption{Evaluation results on hallucination benchmarks.}
36
+ \label{tab:video_hallucination}
37
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/6.tex ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h]
2
+ \centering
3
+ \resizebox{0.8\textwidth}{!}{%
4
+ \begin{tabular}{l|cccccc}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{6}{c}{\textbf{E.T. Bench-Grounding} \cite{liu2024etbench}}\\
7
+ & TVG$_{F1}$ & EPM$_{F1}$ & TAL$_{F1}$ & EVS$_{F1}$ & VHD$_{F1}$ & \textbf{Mean$_{F1}$}\\
8
+ \midrule
9
+ \multicolumn{7}{l}{\textit{Proprietary models}} \\
10
+ GPT-4V \cite{gpt4v} & \color{lightgray}27.0 & \color{lightgray}1.8 & \color{lightgray}18.0 & \color{lightgray}\underline{28.6} & \color{lightgray}55.1 & \color{lightgray}26.1\\
11
+ GPT-4o \cite{gpt4o} & \color{lightgray}40.4 & \color{lightgray}4.5 & \color{lightgray}20.0 & \color{lightgray}17.6 & \color{lightgray}56.9 & \color{lightgray}27.9\\
12
+ Gemini-1.5-Flash \cite{geminiteam2024gemini15unlockingmultimodal} & \color{lightgray}\underline{43.9} & \color{lightgray}5.4 & \color{lightgray}27.0 & \color{lightgray}5.4 & \color{lightgray}60.8 & \color{lightgray}28.5\\
13
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & \color{lightgray}43.1 & \color{lightgray}6.2 & \color{lightgray}33.8 & \color{lightgray}7.9 & \color{lightgray}47.0 & \color{lightgray}27.6\\
14
+ \midrule
15
+ \multicolumn{7}{l}{\textit{Open-source models ($<$10B)}} \\
16
+ LITA \cite{huang2024lita} & 22.2 & 4.6 & 18.0 & 29.7 & 23.9 & 19.7 \\
17
+ VTG-LLM \cite{guo2024vtg} & 15.9 & 3.7 & 14.4 & 26.8 & 48.2 & 21.8 \\
18
+ % TimeChat 26.2 & 3.9 & 10.1 & 29.1 & 40.5 & 22.0\\
19
+ TimeChat \cite{Ren2023TimeChat} $^\dag$ & - & - & - & - & - & 24.3 \\
20
+ E.T. Chat \cite{liu2024etbench} $^\dag$ & 38.6 & 10.2 & 30.8 & \textbf{25.4} & 62.5 & 33.5 \\
21
+ Tarsier-7B \cite{wang2024tarsierrecipestrainingevaluating} $^\dag$ & 39.6 & 9.0 & 25.0 & \textbf{25.4} & 47.6 & 30.9 \\
22
+ Qwen2-VL-7B \cite{qwen2vl} $^\dag$ & \textbf{39.7} & 7.0 & 26.9 & 17.1 & \underline{\textbf{66.9}} & 33.5 \\
23
+ \midrule
24
+ Tarsier2-7B $^\dag$ & 38.4 & \underline{\textbf{11.0}} & \underline{\textbf{31.8}} & 19.4 & 66.8 & \underline{\textbf{35.5}} \\
25
+ \bottomrule
26
+ \end{tabular}
27
+ }
28
+ \caption{Evaluation results on E.T. Bench-Grounding. Results marked in gray are tested on a subset. $\dag$ denotes the model is fine-tuned on E.T. Instruct 164K.}
29
+ \label{tab:grounding}
30
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/7.tex ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h]
2
+ \centering
3
+ \setlength{\tabcolsep}{2pt} % 调整列间距
4
+ \scriptsize
5
+ \begin{minipage}{0.27\textwidth}
6
+ \centering
7
+ \begin{tabular}{l|c}
8
+ \toprule
9
+ \multirow{2}{*}{\textbf{Model}} & \textbf{EgoTaskQA} \\
10
+ & Exact Match \\
11
+ \midrule
12
+ Human & 80.0 \\
13
+ HCRN \cite{le2020hierarchical} & 42.2 \\
14
+ GF \cite{bai2024glance} & 44.3 \\
15
+ EgoVLPv2 \cite{pramanick2023egovlpv2} & 46.3 \\
16
+ \midrule
17
+ \modelname & \underline{\textbf{77.5}} \\
18
+ \bottomrule
19
+ \end{tabular}
20
+ \end{minipage}
21
+ \begin{minipage}{0.415\textwidth}
22
+ \centering
23
+ \begin{tabular}{l|c}
24
+ \toprule
25
+ \multirow{2}{*}{\textbf{Model}} & \textbf{RoboVQA} \\
26
+ & BLEU-1/2/3/4\\
27
+ \midrule
28
+ LLaMA-AdapterV2 \cite{gao2023llama} & 27.8/16.0/10.9/8.1 \\
29
+ LLaVA-OV-7B \cite{li2024llavanext} & 38.1/33.6/31.8/31.0 \\
30
+ RoboMamba \cite{liu2024robomamba} & 54.9/44.2/39.5/36.3 \\
31
+ MLCD \cite{an2025multi} & 73.2/66.4/60.6/56.6 \\
32
+ \midrule
33
+ \modelname & \underline{\textbf{77.1}}/\underline{\textbf{67.4}}/\underline{\textbf{61.5}}/\underline{\textbf{56.8}} \\
34
+ \bottomrule
35
+ \end{tabular}
36
+ \end{minipage}
37
+ \begin{minipage}{0.3\textwidth}
38
+ \centering
39
+ \begin{tabular}{l|c}
40
+ \toprule
41
+ \multirow{2}{*}{\textbf{Model}} & \textbf{OpenEQA} \\
42
+ & GPT-4\\
43
+ \midrule
44
+ Human & 86.8 \\
45
+ GPT-4V \cite{gpt4v} & 55.3 \\
46
+ Gemini-1.5-Pro \cite{geminiteam2024gemini15unlockingmultimodal} & 44.9 \\
47
+ MLCD \cite{an2025multi} & 48.8 \\
48
+ \midrule
49
+ \modelname & \underline{\textbf{58.7}} \\
50
+ \bottomrule
51
+ \end{tabular}
52
+ \end{minipage}
53
+ \caption{Evaluation results on embodied question-answering tasks, including EgoTaskQA, RoboVQA and OpenEQA.}
54
+ \label{table:evaluate-EgoTaskQA}
55
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/8.tex ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[h]
2
+ \centering
3
+ \scriptsize
4
+ \setlength{\tabcolsep}{3pt} % 调整列间距
5
+ \resizebox{\textwidth}{!}{%
6
+ \begin{tabular}{l|ccc|cc|c}
7
+ \toprule
8
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{3}{c|}{\textbf{Caption}} & \multicolumn{2}{c|}{\textbf{Video QA}} & \multirow{2}{*}{\textbf{Hallucination}} \\
9
+ & DREAM-1K & TempCompass-cg & Vinoground-Text & Short & Long & \\
10
+ \midrule
11
+ Tarsier1-7B & 34.6 & 55.3 & 29.8 & 45.6 & 46.3 & 56.3 \\
12
+ \midrule
13
+ \makecell[l]{Tarsier1-7B-Qwen\\ \quad\textit{upgrading model}} & 38.4 ($\uparrow$3.8) & 59.3 ($\uparrow$4.0) & 48.6 ($\uparrow$18.8) & 52.4 ($\uparrow$6.8) & 57.6 ($\uparrow$11.3) & 62.1 ($\uparrow$5.8) \\
14
+ \midrule
15
+ \makecell[l]{Tarsier2-7B\\ \quad\textit{upgrading model}+\textit{data}} & 40.8 ($\uparrow$6.2) & 60.1 ($\uparrow$4.8) & 60.2 ($\uparrow$30.4) & 55.3 ($\uparrow$9.7) & 64.1 ($\uparrow$17.8) & 63.5 ($\uparrow$7.2)\\
16
+ \bottomrule
17
+ \end{tabular}
18
+ }
19
+ \caption{Results of the ablation study for pre-training. Tarsier1-7b-Qwen stands for the model where the base model is upgraded to Qwen2-VL, while the pre-training dataset remains the same as Tarsier1. Tarsier2 is trained from Qwen2-VL with an expanded pre-training dataset, growing from 13 million in Tarsier1 to 40 million samples.}
20
+ \label{tab:pretrain_ab_vlm_and_data}
21
+ \end{table}
workspace/01_Productivity_Flow/task_2_table_tex_download/gt/9.tex ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \begin{table}[t]
2
+ \centering
3
+ \setlength{\tabcolsep}{2pt} % 调整列间距
4
+ \resizebox{\textwidth}{!}{\begin{tabular}{l|ccc|cc|c}
5
+ \toprule
6
+ \multirow{2}{*}{\textbf{Model}} & \multicolumn{3}{c|}{\textbf{Caption}} & \multicolumn{2}{c|}{\textbf{Video QA}} & \multirow{2}{*}{\textbf{Hallucination}} \\
7
+ & DREAM-1K & TempCompass-cg & Vinoground-Text & Short & Long & \\
8
+ \midrule
9
+ \modelname-7B-SFT & 40.8 & 60.1 & 60.2 & 56.2 & 63.2 & 71.9 \\
10
+ \midrule
11
+ \quad \textit{w/o SFT} & 35.2 ($\downarrow$5.6) & 50.5 ($\downarrow$9.6) & 57.2 ($\downarrow$3.0) & 55.3 ($\downarrow$0.9) & 64.1 ($\uparrow$0.9) & 63.5 ($\downarrow$8.4) \\
12
+ \quad \textit{w/o grounding} & 37.4 ($\downarrow$3.4) & 50.2 ($\downarrow$9.9) & 60.6 ($\uparrow$0.4) & 55.9 ($\downarrow$0.3) & 61.9 ($\downarrow$1.3) & 68.6 ($\downarrow$3.3) \\
13
+ \bottomrule
14
+ \end{tabular}}
15
+ \caption{Ablation study of temporal grounding dataset during the SFT phase. \modelname-7B-SFT refers to the model after the SFT phase. \textit{w/o SFT} refers to the model after pre-training; \textit{w/o grounding} refers to the model fine-tinued without grounding information.}
16
+ \label{tab:pretrain_ab_grounding}
17
+ \end{table}
workspace/01_Productivity_Flow/task_3_bibtex/.DS_Store ADDED
Binary file (6.15 kB). View file
 
workspace/01_Productivity_Flow/task_3_bibtex/exec/2489e1b1a4830c47c93322340d8a9f61.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2628f5fa215d4a5f509d743bce90cd90df6ca148e458bea22238ee30a940b33
3
+ size 1818194
workspace/01_Productivity_Flow/task_3_bibtex/exec/2959f681e57b94946d8d83e63108743b.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3058b36775fbd7bd52182365c0c21f2ff2b7f183d1dd059b48921516558a1ef
3
+ size 1093777
workspace/01_Productivity_Flow/task_3_bibtex/exec/4de47fd19b562f6d80b38ca25c100e34.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89ad711d0759cb649810e98f5c576d0fab97fe121c0b2402ef6f7d517be0a9de
3
+ size 747855
workspace/01_Productivity_Flow/task_3_bibtex/exec/4fb4a8c10c244047b34b98f0802ef736.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c98c6b601a0484662b4a2260982cdf15be842e2f467942111d4a291582ea359
3
+ size 252451
workspace/01_Productivity_Flow/task_3_bibtex/exec/695b9b59dee6083f338f50f697bbc0a8.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e37500eaf2a306bb843a6732a9718338f99c326b508f561330cd911497f7910
3
+ size 31848492
workspace/01_Productivity_Flow/task_3_bibtex/exec/6d7048c05f54c7810f325586cb691275.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4eca60ae9d3c1548242da02ece0e2f35ce23d3a983255830d98212aa92e3167
3
+ size 6329953
workspace/01_Productivity_Flow/task_3_bibtex/exec/757d0bb0887db877663297fbb1ac0f93.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa25bbf27fedcae1b413dddfe020af31b2cb63ada049489893661f25a8e46d0a
3
+ size 6946266
workspace/01_Productivity_Flow/task_3_bibtex/exec/89c977567a6162eb19c09946d25d4e7f.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191c3283f7f629df42adcf0569aad1ec4adf54d9e2f5d47214aff44259fe4a2e
3
+ size 1524734
workspace/01_Productivity_Flow/task_3_bibtex/exec/a1545d8c44f7879527b4bfdc2d550962.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58e79fd6452531b9a388d8c7535866d61f9a2260d4b70b776ca7a95594a78d02
3
+ size 28337581
workspace/01_Productivity_Flow/task_3_bibtex/exec/a9c8098ce76332faaa6e24a10098bd88.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aabe3df8373c5e36754bd0536e9983b3c6643cd225025fd0e077a2fb3af3702
3
+ size 6502878
workspace/01_Productivity_Flow/task_3_bibtex/exec/b059bfe6b011cb483c719fd293f13f7b.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b30c981018bb592e3e06c76b32ba2e7c520f3a9f96e876e05102c0aaa2f893ce
3
+ size 11828548
workspace/01_Productivity_Flow/task_3_bibtex/exec/b8b730d1313f51fef24a5a81e46e292f.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:345e4e52c460951c0f2a301c5b020665e51a32db2b6cd711a63a1ebf62f77b19
3
+ size 12853939
workspace/01_Productivity_Flow/task_3_bibtex/exec/ce85fc04493b4ec6ab8e4d174ddbe8e1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6dcf9cfb435f678ca028daf9f6aab1e6bd64000ebf5309776d790655086a3e
3
+ size 320228
workspace/01_Productivity_Flow/task_3_bibtex/exec/d197b59e06827356f606ff41a479f4ee.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c66955ad9c73de23c05255f7b3ee0723a96140086e859aac3480b3450c1307b7
3
+ size 10398125
workspace/01_Productivity_Flow/task_3_bibtex/exec/d2bcd6b1d8428116ffe3df31e783e72a.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cc20b3c5b8d25b8b53868fc4ec1792c144f07d67bdf7395138efd4422197e7b
3
+ size 1844642
workspace/01_Productivity_Flow/task_3_bibtex/exec/e4b6db4b71fd970057e5b48a2e4e26ea.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdd2751ea3f3c4b868fc6268bb1424d5a160f9b7fa6c48cba51af372ec8b2732
3
+ size 1197690
workspace/01_Productivity_Flow/task_3_bibtex/exec/e522f06ccdb0216fef37f1b591d9dc1f.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31dddf0cee705b926a773f69650cc6de5044398fca299cca919fe7dc0c76ad43
3
+ size 187754
workspace/01_Productivity_Flow/task_3_bibtex/exec/e5f870f54750f54b87de5634c9d5e075.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1984bb50a5b90fddb895fdc3a0f72e5bc977148c9f63ef6040cbe7a3e1f0d98
3
+ size 1797405
workspace/01_Productivity_Flow/task_3_bibtex/exec/e9b81fe821dc398c0644e1dda2fab714.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5b9fd2aef0f45e722d7fc80aec389b31e8b5ec7e0fa1c4899ed635405838c5f
3
+ size 3391658
workspace/01_Productivity_Flow/task_3_bibtex/exec/f62a2ebbbe16dc365c992371aff60ea2.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd2fe06adb949db64404f4007bd5c82243be1e8b14de573e8e3d7a267055300
3
+ size 25371842
workspace/01_Productivity_Flow/task_3_bibtex/exec/fe790c3e46d18e0c3d6ed08a26d1e322.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cb58cc5ad3735dae1c3e49e988a2dd6b01e608eb0e43d20b0b8266efd11e94c
3
+ size 890663
workspace/01_Productivity_Flow/task_3_bibtex/gt/2203.02155.bib ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ @misc{ouyang2022traininglanguagemodelsfollow,
2
+ title={Training language models to follow instructions with human feedback},
3
+ author={Long Ouyang and Jeff Wu and Xu Jiang and Diogo Almeida and Carroll L. Wainwright and Pamela Mishkin and Chong Zhang and Sandhini Agarwal and Katarina Slama and Alex Ray and John Schulman and Jacob Hilton and Fraser Kelton and Luke Miller and Maddie Simens and Amanda Askell and Peter Welinder and Paul Christiano and Jan Leike and Ryan Lowe},
4
+ year={2022},
5
+ eprint={2203.02155},
6
+ archivePrefix={arXiv},
7
+ primaryClass={cs.CL},
8
+ url={https://arxiv.org/abs/2203.02155},
9
+ }
workspace/01_Productivity_Flow/task_3_bibtex/gt/2301.12597.bib ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ @misc{li2023blip2bootstrappinglanguageimagepretraining,
2
+ title={BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models},
3
+ author={Junnan Li and Dongxu Li and Silvio Savarese and Steven Hoi},
4
+ year={2023},
5
+ eprint={2301.12597},
6
+ archivePrefix={arXiv},
7
+ primaryClass={cs.CV},
8
+ url={https://arxiv.org/abs/2301.12597},
9
+ }