Chelsea707 commited on
Commit
cf3ec9f
·
verified ·
1 Parent(s): 1f0781b

MinerU Batch 8f46ae6c-07be-4c29-a893-b2f0e7aaabad (Part 3/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_content_list.json +737 -0
  3. data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_model.json +1013 -0
  4. data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf +3 -0
  5. data/2025/2504_05xxx/2504.05758/full.md +173 -0
  6. data/2025/2504_05xxx/2504.05758/images/1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg +3 -0
  7. data/2025/2504_05xxx/2504.05758/images/3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg +3 -0
  8. data/2025/2504_05xxx/2504.05758/images/45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg +3 -0
  9. data/2025/2504_05xxx/2504.05758/images/4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg +3 -0
  10. data/2025/2504_05xxx/2504.05758/images/57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg +3 -0
  11. data/2025/2504_05xxx/2504.05758/images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg +3 -0
  12. data/2025/2504_05xxx/2504.05758/images/93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg +3 -0
  13. data/2025/2504_05xxx/2504.05758/images/acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg +3 -0
  14. data/2025/2504_05xxx/2504.05758/images/bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg +3 -0
  15. data/2025/2504_05xxx/2504.05758/images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg +3 -0
  16. data/2025/2504_05xxx/2504.05758/images/e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg +3 -0
  17. data/2025/2504_05xxx/2504.05758/images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg +3 -0
  18. data/2025/2504_05xxx/2504.05758/images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg +3 -0
  19. data/2025/2504_05xxx/2504.05758/layout.json +0 -0
  20. data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_content_list.json +1200 -0
  21. data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_model.json +1681 -0
  22. data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf +3 -0
  23. data/2025/2504_05xxx/2504.05786/full.md +247 -0
  24. data/2025/2504_05xxx/2504.05786/images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg +3 -0
  25. data/2025/2504_05xxx/2504.05786/images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg +3 -0
  26. data/2025/2504_05xxx/2504.05786/images/1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg +3 -0
  27. data/2025/2504_05xxx/2504.05786/images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg +3 -0
  28. data/2025/2504_05xxx/2504.05786/images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg +3 -0
  29. data/2025/2504_05xxx/2504.05786/images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg +3 -0
  30. data/2025/2504_05xxx/2504.05786/images/e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg +3 -0
  31. data/2025/2504_05xxx/2504.05786/images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg +3 -0
  32. data/2025/2504_05xxx/2504.05786/layout.json +0 -0
  33. data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_content_list.json +1238 -0
  34. data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_model.json +1442 -0
  35. data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf +3 -0
  36. data/2025/2504_05xxx/2504.05792/full.md +275 -0
  37. data/2025/2504_05xxx/2504.05792/images/01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg +3 -0
  38. data/2025/2504_05xxx/2504.05792/images/0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg +3 -0
  39. data/2025/2504_05xxx/2504.05792/images/14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg +3 -0
  40. data/2025/2504_05xxx/2504.05792/images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg +3 -0
  41. data/2025/2504_05xxx/2504.05792/images/151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg +3 -0
  42. data/2025/2504_05xxx/2504.05792/images/1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg +3 -0
  43. data/2025/2504_05xxx/2504.05792/images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg +3 -0
  44. data/2025/2504_05xxx/2504.05792/images/247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg +3 -0
  45. data/2025/2504_05xxx/2504.05792/images/37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg +3 -0
  46. data/2025/2504_05xxx/2504.05792/images/44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg +3 -0
  47. data/2025/2504_05xxx/2504.05792/images/486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg +3 -0
  48. data/2025/2504_05xxx/2504.05792/images/547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg +3 -0
  49. data/2025/2504_05xxx/2504.05792/images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg +3 -0
  50. data/2025/2504_05xxx/2504.05792/images/5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg +3 -0
.gitattributes CHANGED
@@ -1284,3 +1284,11 @@ data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf
1284
  data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1285
  data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1286
  data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1284
  data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1285
  data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1286
  data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf filter=lfs diff=lfs merge=lfs -text
1287
+ data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf filter=lfs diff=lfs merge=lfs -text
1288
+ data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf filter=lfs diff=lfs merge=lfs -text
1289
+ data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf filter=lfs diff=lfs merge=lfs -text
1290
+ data/2025/2504_05xxx/2504.05812/2ebd62c4-e647-47e7-bb58-1c94267578a3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1291
+ data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf filter=lfs diff=lfs merge=lfs -text
1292
+ data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf filter=lfs diff=lfs merge=lfs -text
1293
+ data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
1294
+ data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_content_list.json ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 130,
8
+ 69,
9
+ 867,
10
+ 137
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Yujia Lou \nUniversity of Rochester \nRochester, USA",
17
+ "bbox": [
18
+ 130,
19
+ 143,
20
+ 277,
21
+ 190
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Jie Liu \nUniversity of Minnesota \nMinneapolis, USA",
28
+ "bbox": [
29
+ 424,
30
+ 143,
31
+ 573,
32
+ 190
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Yuan Sheng Northeastern University Seattle, USA",
39
+ "bbox": [
40
+ 722,
41
+ 143,
42
+ 867,
43
+ 184
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Jiawei Wang \nUniversity of California Los Angeles, USA",
50
+ "bbox": [
51
+ 130,
52
+ 220,
53
+ 274,
54
+ 263
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Yiwei Zhang Cornell University Ithaca, USA",
61
+ "bbox": [
62
+ 441,
63
+ 220,
64
+ 555,
65
+ 262
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Yaokun Ren* Northeastern University Seattle, USA",
72
+ "bbox": [
73
+ 722,
74
+ 220,
75
+ 867,
76
+ 263
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle \"Credit Card Fraud Detection\" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research.",
83
+ "bbox": [
84
+ 66,
85
+ 311,
86
+ 488,
87
+ 612
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning",
94
+ "bbox": [
95
+ 66,
96
+ 625,
97
+ 478,
98
+ 652
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "I. INTRODUCTION",
105
+ "text_level": 1,
106
+ "bbox": [
107
+ 210,
108
+ 661,
109
+ 356,
110
+ 674
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss",
117
+ "bbox": [
118
+ 66,
119
+ 680,
120
+ 486,
121
+ 902
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem.",
128
+ "bbox": [
129
+ 506,
130
+ 311,
131
+ 928,
132
+ 395
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential.",
139
+ "bbox": [
140
+ 506,
141
+ 401,
142
+ 928,
143
+ 693
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "text",
149
+ "text": "The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to",
150
+ "bbox": [
151
+ 506,
152
+ 699,
153
+ 929,
154
+ 907
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains.",
161
+ "bbox": [
162
+ 66,
163
+ 66,
164
+ 486,
165
+ 233
166
+ ],
167
+ "page_idx": 1
168
+ },
169
+ {
170
+ "type": "text",
171
+ "text": "II. METHOD",
172
+ "text_level": 1,
173
+ "bbox": [
174
+ 230,
175
+ 243,
176
+ 333,
177
+ 256
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Suppose dataset $D = \\{(x_{i},y_{i})\\}_{i = 1}^{N}$ , where $x_{i}\\in R^{d}$ represents input samples and $y_{i}\\in \\{0,1\\}$ represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, $|\\{yi = 1\\} | < < |\\{y_i = 0\\} |$ , traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1.",
184
+ "bbox": [
185
+ 66,
186
+ 263,
187
+ 486,
188
+ 662
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "First, we define a hidden variable $z$ to model the potential representation of the input data $x$ , and use the Bayesian generation model to describe the data generation process:",
195
+ "bbox": [
196
+ 66,
197
+ 667,
198
+ 486,
199
+ 712
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "equation",
205
+ "text": "\n$$\np (x, y, z) = p (y \\mid z) p (z \\mid x) p (x)\n$$\n",
206
+ "text_format": "latex",
207
+ "bbox": [
208
+ 163,
209
+ 719,
210
+ 413,
211
+ 734
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "Among them, $p(y \\mid z)$ represents the posterior distribution of the classifier for the latent variable, and $p(z \\mid x)$ represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:",
218
+ "bbox": [
219
+ 66,
220
+ 744,
221
+ 486,
222
+ 823
223
+ ],
224
+ "page_idx": 1
225
+ },
226
+ {
227
+ "type": "equation",
228
+ "text": "\n$$\n\\log (y \\mid x) = \\log \\int p (y \\mid z) p (z \\mid x) d z\n$$\n",
229
+ "text_format": "latex",
230
+ "bbox": [
231
+ 151,
232
+ 829,
233
+ 423,
234
+ 854
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "image",
240
+ "img_path": "images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg",
241
+ "image_caption": [
242
+ "Figure 1. The architecture of the probabilistic graphical model"
243
+ ],
244
+ "image_footnote": [],
245
+ "bbox": [
246
+ 545,
247
+ 73,
248
+ 893,
249
+ 284
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution $q(z|x)$ to approximate $p(z|x)$ , and optimize the model through the evidence lower bound (ELBO):",
256
+ "bbox": [
257
+ 508,
258
+ 338,
259
+ 928,
260
+ 416
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "equation",
266
+ "text": "\n$$\n\\log p (y \\mid x) \\geq E _ {q (z \\mid x)} [ \\log p (y \\mid z) ] - D _ {K L} (q (z \\mid x) \\| p (z))\n$$\n",
267
+ "text_format": "latex",
268
+ "bbox": [
269
+ 509,
270
+ 426,
271
+ 931,
272
+ 446
273
+ ],
274
+ "page_idx": 1
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "Among them, $D_{KL}(\\cdot \\| \\cdot)$ represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:",
279
+ "bbox": [
280
+ 508,
281
+ 450,
282
+ 928,
283
+ 551
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "equation",
289
+ "text": "\n$$\nL = \\sum_ {i = 1} ^ {N} w (y _ {i}) \\left[ E _ {q (z | x _ {i})} [ \\log p (y _ {i} \\mid z) ] - \\right.\n$$\n",
290
+ "text_format": "latex",
291
+ "bbox": [
292
+ 580,
293
+ 561,
294
+ 854,
295
+ 606
296
+ ],
297
+ "page_idx": 1
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "Among them, $w(y_{i})$ is the category weight coefficient, and a higher weight is set for minority class samples, for example:",
302
+ "bbox": [
303
+ 508,
304
+ 618,
305
+ 926,
306
+ 652
307
+ ],
308
+ "page_idx": 1
309
+ },
310
+ {
311
+ "type": "equation",
312
+ "text": "\n$$\nw \\left(y _ {i}\\right) = \\frac {N _ {\\text {m a j o r}}}{N _ {\\text {m i n o r}}}\n$$\n",
313
+ "text_format": "latex",
314
+ "bbox": [
315
+ 658,
316
+ 660,
317
+ 779,
318
+ 700
319
+ ],
320
+ "page_idx": 1
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "Where A and B represent the number of samples in the majority class and the minority class, respectively.",
325
+ "bbox": [
326
+ 508,
327
+ 709,
328
+ 926,
329
+ 739
330
+ ],
331
+ "page_idx": 1
332
+ },
333
+ {
334
+ "type": "text",
335
+ "text": "In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that $p(z \\mid x)$ obeys the normal distribution:",
336
+ "bbox": [
337
+ 508,
338
+ 744,
339
+ 926,
340
+ 790
341
+ ],
342
+ "page_idx": 1
343
+ },
344
+ {
345
+ "type": "equation",
346
+ "text": "\n$$\nq (z \\mid x) = N (\\mu (x), \\sigma^ {2} (x))\n$$\n",
347
+ "text_format": "latex",
348
+ "bbox": [
349
+ 617,
350
+ 800,
351
+ 820,
352
+ 819
353
+ ],
354
+ "page_idx": 1
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "And optimize it by reparameterization technique:",
359
+ "bbox": [
360
+ 509,
361
+ 828,
362
+ 833,
363
+ 843
364
+ ],
365
+ "page_idx": 1
366
+ },
367
+ {
368
+ "type": "equation",
369
+ "text": "\n$$\nz = \\mu (x) + \\sigma (x) \\cdot \\varepsilon , \\quad \\varepsilon \\sim \\mathrm {N} (0, \\mathrm {I})\n$$\n",
370
+ "text_format": "latex",
371
+ "bbox": [
372
+ 594,
373
+ 848,
374
+ 841,
375
+ 864
376
+ ],
377
+ "page_idx": 1
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building",
382
+ "bbox": [
383
+ 508,
384
+ 875,
385
+ 926,
386
+ 902
387
+ ],
388
+ "page_idx": 1
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:",
393
+ "bbox": [
394
+ 66,
395
+ 66,
396
+ 486,
397
+ 247
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "equation",
403
+ "text": "\n$$\n\\min _ {G} \\max _ {D} E _ {x \\sim p _ {\\text {d a t a}} (x)} [ \\log D (z) ] +\n$$\n",
404
+ "text_format": "latex",
405
+ "bbox": [
406
+ 156,
407
+ 255,
408
+ 393,
409
+ 280
410
+ ],
411
+ "page_idx": 2
412
+ },
413
+ {
414
+ "type": "equation",
415
+ "text": "\n$$\nE _ {x \\sim q _ {(z | x)}} [ \\log (1 - D (z)) ]\n$$\n",
416
+ "text_format": "latex",
417
+ "bbox": [
418
+ 160,
419
+ 284,
420
+ 331,
421
+ 306
422
+ ],
423
+ "page_idx": 2
424
+ },
425
+ {
426
+ "type": "text",
427
+ "text": "Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples.",
428
+ "bbox": [
429
+ 66,
430
+ 313,
431
+ 486,
432
+ 357
433
+ ],
434
+ "page_idx": 2
435
+ },
436
+ {
437
+ "type": "text",
438
+ "text": "In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks.",
439
+ "bbox": [
440
+ 66,
441
+ 363,
442
+ 486,
443
+ 405
444
+ ],
445
+ "page_idx": 2
446
+ },
447
+ {
448
+ "type": "text",
449
+ "text": "III. EXPERIMENT",
450
+ "text_level": 1,
451
+ "bbox": [
452
+ 215,
453
+ 415,
454
+ 346,
455
+ 429
456
+ ],
457
+ "page_idx": 2
458
+ },
459
+ {
460
+ "type": "text",
461
+ "text": "A. Dataset",
462
+ "text_level": 1,
463
+ "bbox": [
464
+ 66,
465
+ 436,
466
+ 155,
467
+ 450
468
+ ],
469
+ "page_idx": 2
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "This study employs the Kaggle \"Credit Card Fraud Detection\" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation.",
474
+ "bbox": [
475
+ 66,
476
+ 454,
477
+ 486,
478
+ 608
479
+ ],
480
+ "page_idx": 2
481
+ },
482
+ {
483
+ "type": "text",
484
+ "text": "Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into $70\\%$ training, $15\\%$ validation, and $15\\%$ test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications.",
485
+ "bbox": [
486
+ 66,
487
+ 614,
488
+ 486,
489
+ 864
490
+ ],
491
+ "page_idx": 2
492
+ },
493
+ {
494
+ "type": "text",
495
+ "text": "B. Experiment Result",
496
+ "text_level": 1,
497
+ "bbox": [
498
+ 509,
499
+ 68,
500
+ 666,
501
+ 82
502
+ ],
503
+ "page_idx": 2
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance.",
508
+ "bbox": [
509
+ 506,
510
+ 85,
511
+ 929,
512
+ 377
513
+ ],
514
+ "page_idx": 2
515
+ },
516
+ {
517
+ "type": "table",
518
+ "img_path": "images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg",
519
+ "table_caption": [
520
+ "Table 1. Integration Testing 1"
521
+ ],
522
+ "table_footnote": [],
523
+ "table_body": "<table><tr><td>Model</td><td>AUC</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>GAN [20]</td><td>0.842</td><td>0.716</td><td>0.654</td><td>0.684</td></tr><tr><td>ADASYN [21]</td><td>0.856</td><td>0.729</td><td>0.668</td><td>0.697</td></tr><tr><td>SMOTE [22]</td><td>0.871</td><td>0.742</td><td>0.683</td><td>0.711</td></tr><tr><td>BRF [23]</td><td>0.889</td><td>0.764</td><td>0.721</td><td>0.742</td></tr><tr><td>XGBOOST-Cost [24]</td><td>0.903</td><td>0.779</td><td>0.735</td><td>0.757</td></tr><tr><td>SAAD [25]</td><td>0.915</td><td>0.793</td><td>0.751</td><td>0.771</td></tr><tr><td>HAN [26]</td><td>0.927</td><td>0.806</td><td>0.768</td><td>0.786</td></tr><tr><td>Ours</td><td>0.941</td><td>0.822</td><td>0.785</td><td>0.803</td></tr></table>",
524
+ "bbox": [
525
+ 509,
526
+ 404,
527
+ 929,
528
+ 527
529
+ ],
530
+ "page_idx": 2
531
+ },
532
+ {
533
+ "type": "text",
534
+ "text": "Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model.",
535
+ "bbox": [
536
+ 506,
537
+ 529,
538
+ 928,
539
+ 654
540
+ ],
541
+ "page_idx": 2
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory.",
546
+ "bbox": [
547
+ 506,
548
+ 659,
549
+ 929,
550
+ 813
551
+ ],
552
+ "page_idx": 2
553
+ },
554
+ {
555
+ "type": "image",
556
+ "img_path": "images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg",
557
+ "image_caption": [
558
+ "Figure 2. Loss function drop graph"
559
+ ],
560
+ "image_footnote": [],
561
+ "bbox": [
562
+ 83,
563
+ 80,
564
+ 470,
565
+ 263
566
+ ],
567
+ "page_idx": 3
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns.",
572
+ "bbox": [
573
+ 70,
574
+ 306,
575
+ 485,
576
+ 487
577
+ ],
578
+ "page_idx": 3
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3.",
583
+ "bbox": [
584
+ 70,
585
+ 494,
586
+ 485,
587
+ 852
588
+ ],
589
+ "page_idx": 3
590
+ },
591
+ {
592
+ "type": "image",
593
+ "img_path": "images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg",
594
+ "image_caption": [
595
+ "Figure 3. T-SNE result map after training"
596
+ ],
597
+ "image_footnote": [],
598
+ "bbox": [
599
+ 517,
600
+ 69,
601
+ 947,
602
+ 349
603
+ ],
604
+ "page_idx": 3
605
+ },
606
+ {
607
+ "type": "text",
608
+ "text": "From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness.",
609
+ "bbox": [
610
+ 513,
611
+ 381,
612
+ 926,
613
+ 534
614
+ ],
615
+ "page_idx": 3
616
+ },
617
+ {
618
+ "type": "text",
619
+ "text": "Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class.",
620
+ "bbox": [
621
+ 513,
622
+ 541,
623
+ 926,
624
+ 666
625
+ ],
626
+ "page_idx": 3
627
+ },
628
+ {
629
+ "type": "text",
630
+ "text": "However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance.",
631
+ "bbox": [
632
+ 513,
633
+ 672,
634
+ 926,
635
+ 852
636
+ ],
637
+ "page_idx": 3
638
+ },
639
+ {
640
+ "type": "text",
641
+ "text": "IV. CONCLUSION",
642
+ "text_level": 1,
643
+ "bbox": [
644
+ 215,
645
+ 68,
646
+ 346,
647
+ 80
648
+ ],
649
+ "page_idx": 4
650
+ },
651
+ {
652
+ "type": "text",
653
+ "text": "This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods.",
654
+ "bbox": [
655
+ 66,
656
+ 87,
657
+ 486,
658
+ 282
659
+ ],
660
+ "page_idx": 4
661
+ },
662
+ {
663
+ "type": "text",
664
+ "text": "Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios.",
665
+ "bbox": [
666
+ 66,
667
+ 287,
668
+ 486,
669
+ 565
670
+ ],
671
+ "page_idx": 4
672
+ },
673
+ {
674
+ "type": "text",
675
+ "text": "REFERENCES",
676
+ "text_level": 1,
677
+ "bbox": [
678
+ 232,
679
+ 574,
680
+ 320,
681
+ 587
682
+ ],
683
+ "page_idx": 4
684
+ },
685
+ {
686
+ "type": "list",
687
+ "sub_type": "ref_text",
688
+ "list_items": [
689
+ "[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, \"A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction,\" arXiv preprint arXiv:2502.06847, 2025.",
690
+ "[2] J. Liu, \"Multimodal Data-Driven Factor Models for Stock Market Forecasting,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969.",
691
+ "[3] Y. Deng, \"A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727.",
692
+ "[4] P. Feng, \"Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026.",
693
+ "[5] X. Du, \"Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection,\" Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024.",
694
+ "[6] S. Moolchandani, \"Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking,\" International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024.",
695
+ "[7] S. Arya, T. Rahman and V. Gogate, \"Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models,\" Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024."
696
+ ],
697
+ "bbox": [
698
+ 68,
699
+ 592,
700
+ 486,
701
+ 897
702
+ ],
703
+ "page_idx": 4
704
+ },
705
+ {
706
+ "type": "list",
707
+ "sub_type": "ref_text",
708
+ "list_items": [
709
+ "[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion,\" arXiv preprint arXiv:2502.03664, 2025.",
710
+ "[9] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949.",
711
+ "[10] Q. Sun and S. Duan, \"User Intent Prediction and Response in Human-Computer Interaction via BiLSTM,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042.",
712
+ "[11] Y. Wang, \"Time-Series Premium Risk Prediction via Bidirectional Transformer,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913.",
713
+ "[12] T. Zhou, Z. Xu and J. Du, \"Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719.",
714
+ "[13] X. Wang, \"Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection,\" Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181.",
715
+ "[14] X. Sun, \"Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency\", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261.",
716
+ "[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, \"The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence,\" Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024.",
717
+ "[16] P. Li, \"Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment,\" Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024.",
718
+ "[17] S. Wang, R. Zhang and X. Shi, \"Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction,\" Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025.",
719
+ "[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, \"A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning,\" arXiv preprint arXiv:2502.09086, 2025.",
720
+ "[19] Y. Yao, \"Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets,\" Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117.",
721
+ "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, \"Generative Adversarial Nets,\" Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014.",
722
+ "[21] H. He and Y. Bai, \"ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning,\" Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008.",
723
+ "[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, \"SMOTE: Synthetic Minority Over-Sampling Technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002.",
724
+ "[23] A. Liaw and M. Wiener, \"Classification and Regression by randomForest,\" R News, vol. 2, no. 3, pp. 18-22, 2002.",
725
+ "[24] T. Chen and C. Guestrin, \"XGBoost: A Scalable Tree Boosting System,\" Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016.",
726
+ "[25] Y. Zhou and R. C. Paffenroth, \"Self-Attention Anomaly Detection,\" Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019.",
727
+ "[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, \"Hierarchical Attention Networks for Document Classification,\" Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016."
728
+ ],
729
+ "bbox": [
730
+ 511,
731
+ 66,
732
+ 928,
733
+ 897
734
+ ],
735
+ "page_idx": 4
736
+ }
737
+ ]
data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_model.json ADDED
@@ -0,0 +1,1013 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.131,
7
+ 0.07,
8
+ 0.868,
9
+ 0.138
10
+ ],
11
+ "angle": 0,
12
+ "content": "Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.132,
18
+ 0.145,
19
+ 0.278,
20
+ 0.191
21
+ ],
22
+ "angle": 0,
23
+ "content": "Yujia Lou \nUniversity of Rochester \nRochester, USA"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.426,
29
+ 0.145,
30
+ 0.574,
31
+ 0.191
32
+ ],
33
+ "angle": 0,
34
+ "content": "Jie Liu \nUniversity of Minnesota \nMinneapolis, USA"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.723,
40
+ 0.145,
41
+ 0.869,
42
+ 0.185
43
+ ],
44
+ "angle": 0,
45
+ "content": "Yuan Sheng Northeastern University Seattle, USA"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.131,
51
+ 0.221,
52
+ 0.276,
53
+ 0.265
54
+ ],
55
+ "angle": 0,
56
+ "content": "Jiawei Wang \nUniversity of California Los Angeles, USA"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.442,
62
+ 0.221,
63
+ 0.556,
64
+ 0.263
65
+ ],
66
+ "angle": 0,
67
+ "content": "Yiwei Zhang Cornell University Ithaca, USA"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.723,
73
+ 0.221,
74
+ 0.869,
75
+ 0.265
76
+ ],
77
+ "angle": 0,
78
+ "content": "Yaokun Ren* Northeastern University Seattle, USA"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.067,
84
+ 0.312,
85
+ 0.489,
86
+ 0.613
87
+ ],
88
+ "angle": 0,
89
+ "content": "Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle \"Credit Card Fraud Detection\" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.068,
95
+ 0.625,
96
+ 0.48,
97
+ 0.654
98
+ ],
99
+ "angle": 0,
100
+ "content": "Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning"
101
+ },
102
+ {
103
+ "type": "title",
104
+ "bbox": [
105
+ 0.211,
106
+ 0.662,
107
+ 0.357,
108
+ 0.675
109
+ ],
110
+ "angle": 0,
111
+ "content": "I. INTRODUCTION"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.067,
117
+ 0.681,
118
+ 0.487,
119
+ 0.904
120
+ ],
121
+ "angle": 0,
122
+ "content": "In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.508,
128
+ 0.312,
129
+ 0.929,
130
+ 0.396
131
+ ],
132
+ "angle": 0,
133
+ "content": "weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.508,
139
+ 0.402,
140
+ 0.929,
141
+ 0.694
142
+ ],
143
+ "angle": 0,
144
+ "content": "In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.508,
150
+ 0.7,
151
+ 0.93,
152
+ 0.908
153
+ ],
154
+ "angle": 0,
155
+ "content": "The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to"
156
+ }
157
+ ],
158
+ [
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.068,
163
+ 0.068,
164
+ 0.487,
165
+ 0.234
166
+ ],
167
+ "angle": 0,
168
+ "content": "diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains."
169
+ },
170
+ {
171
+ "type": "title",
172
+ "bbox": [
173
+ 0.232,
174
+ 0.244,
175
+ 0.334,
176
+ 0.257
177
+ ],
178
+ "angle": 0,
179
+ "content": "II. METHOD"
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.068,
185
+ 0.265,
186
+ 0.487,
187
+ 0.664
188
+ ],
189
+ "angle": 0,
190
+ "content": "Suppose dataset \\( D = \\{(x_{i},y_{i})\\}_{i = 1}^{N} \\), where \\( x_{i}\\in R^{d} \\) represents input samples and \\( y_{i}\\in \\{0,1\\} \\) represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, \\( |\\{yi = 1\\} | < < |\\{y_i = 0\\} | \\), traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.068,
196
+ 0.669,
197
+ 0.487,
198
+ 0.713
199
+ ],
200
+ "angle": 0,
201
+ "content": "First, we define a hidden variable \\( z \\) to model the potential representation of the input data \\( x \\), and use the Bayesian generation model to describe the data generation process:"
202
+ },
203
+ {
204
+ "type": "equation",
205
+ "bbox": [
206
+ 0.165,
207
+ 0.72,
208
+ 0.414,
209
+ 0.736
210
+ ],
211
+ "angle": 0,
212
+ "content": "\\[\np (x, y, z) = p (y \\mid z) p (z \\mid x) p (x)\n\\]"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.068,
218
+ 0.745,
219
+ 0.488,
220
+ 0.824
221
+ ],
222
+ "angle": 0,
223
+ "content": "Among them, \\( p(y \\mid z) \\) represents the posterior distribution of the classifier for the latent variable, and \\( p(z \\mid x) \\) represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:"
224
+ },
225
+ {
226
+ "type": "equation",
227
+ "bbox": [
228
+ 0.153,
229
+ 0.83,
230
+ 0.424,
231
+ 0.855
232
+ ],
233
+ "angle": 0,
234
+ "content": "\\[\n\\log (y \\mid x) = \\log \\int p (y \\mid z) p (z \\mid x) d z\n\\]"
235
+ },
236
+ {
237
+ "type": "image",
238
+ "bbox": [
239
+ 0.547,
240
+ 0.074,
241
+ 0.895,
242
+ 0.285
243
+ ],
244
+ "angle": 0,
245
+ "content": null
246
+ },
247
+ {
248
+ "type": "image_caption",
249
+ "bbox": [
250
+ 0.548,
251
+ 0.304,
252
+ 0.912,
253
+ 0.331
254
+ ],
255
+ "angle": 0,
256
+ "content": "Figure 1. The architecture of the probabilistic graphical model"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.509,
262
+ 0.339,
263
+ 0.929,
264
+ 0.417
265
+ ],
266
+ "angle": 0,
267
+ "content": "However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution \\( q(z|x) \\) to approximate \\( p(z|x) \\), and optimize the model through the evidence lower bound (ELBO):"
268
+ },
269
+ {
270
+ "type": "equation",
271
+ "bbox": [
272
+ 0.511,
273
+ 0.427,
274
+ 0.933,
275
+ 0.447
276
+ ],
277
+ "angle": 0,
278
+ "content": "\\[\n\\log p (y \\mid x) \\geq E _ {q (z \\mid x)} [ \\log p (y \\mid z) ] - D _ {K L} (q (z \\mid x) \\| p (z))\n\\]"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.509,
284
+ 0.452,
285
+ 0.929,
286
+ 0.553
287
+ ],
288
+ "angle": 0,
289
+ "content": "Among them, \\( D_{KL}(\\cdot \\| \\cdot) \\) represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:"
290
+ },
291
+ {
292
+ "type": "equation",
293
+ "bbox": [
294
+ 0.581,
295
+ 0.563,
296
+ 0.856,
297
+ 0.607
298
+ ],
299
+ "angle": 0,
300
+ "content": "\\[\nL = \\sum_ {i = 1} ^ {N} w (y _ {i}) \\left[ E _ {q (z | x _ {i})} [ \\log p (y _ {i} \\mid z) ] - \\right.\n\\]"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.509,
306
+ 0.619,
307
+ 0.928,
308
+ 0.653
309
+ ],
310
+ "angle": 0,
311
+ "content": "Among them, \\( w(y_{i}) \\) is the category weight coefficient, and a higher weight is set for minority class samples, for example:"
312
+ },
313
+ {
314
+ "type": "equation",
315
+ "bbox": [
316
+ 0.659,
317
+ 0.661,
318
+ 0.78,
319
+ 0.702
320
+ ],
321
+ "angle": 0,
322
+ "content": "\\[\nw \\left(y _ {i}\\right) = \\frac {N _ {\\text {m a j o r}}}{N _ {\\text {m i n o r}}}\n\\]"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.509,
328
+ 0.71,
329
+ 0.928,
330
+ 0.74
331
+ ],
332
+ "angle": 0,
333
+ "content": "Where A and B represent the number of samples in the majority class and the minority class, respectively."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.509,
339
+ 0.745,
340
+ 0.928,
341
+ 0.791
342
+ ],
343
+ "angle": 0,
344
+ "content": "In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that \\( p(z \\mid x) \\) obeys the normal distribution:"
345
+ },
346
+ {
347
+ "type": "equation",
348
+ "bbox": [
349
+ 0.618,
350
+ 0.801,
351
+ 0.821,
352
+ 0.82
353
+ ],
354
+ "angle": 0,
355
+ "content": "\\[\nq (z \\mid x) = N (\\mu (x), \\sigma^ {2} (x))\n\\]"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.51,
361
+ 0.829,
362
+ 0.834,
363
+ 0.844
364
+ ],
365
+ "angle": 0,
366
+ "content": "And optimize it by reparameterization technique:"
367
+ },
368
+ {
369
+ "type": "equation",
370
+ "bbox": [
371
+ 0.596,
372
+ 0.849,
373
+ 0.843,
374
+ 0.866
375
+ ],
376
+ "angle": 0,
377
+ "content": "\\[\nz = \\mu (x) + \\sigma (x) \\cdot \\varepsilon , \\quad \\varepsilon \\sim \\mathrm {N} (0, \\mathrm {I})\n\\]"
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.509,
383
+ 0.875,
384
+ 0.928,
385
+ 0.904
386
+ ],
387
+ "angle": 0,
388
+ "content": "In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building"
389
+ }
390
+ ],
391
+ [
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.067,
396
+ 0.068,
397
+ 0.487,
398
+ 0.248
399
+ ],
400
+ "angle": 0,
401
+ "content": "on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:"
402
+ },
403
+ {
404
+ "type": "equation",
405
+ "bbox": [
406
+ 0.157,
407
+ 0.256,
408
+ 0.395,
409
+ 0.281
410
+ ],
411
+ "angle": 0,
412
+ "content": "\\[\n\\min _ {G} \\max _ {D} E _ {x \\sim p _ {\\text {d a t a}} (x)} [ \\log D (z) ] +\n\\]"
413
+ },
414
+ {
415
+ "type": "equation",
416
+ "bbox": [
417
+ 0.161,
418
+ 0.285,
419
+ 0.332,
420
+ 0.307
421
+ ],
422
+ "angle": 0,
423
+ "content": "\\[\nE _ {x \\sim q _ {(z | x)}} [ \\log (1 - D (z)) ]\n\\]"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.068,
429
+ 0.314,
430
+ 0.487,
431
+ 0.358
432
+ ],
433
+ "angle": 0,
434
+ "content": "Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples."
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.068,
440
+ 0.364,
441
+ 0.487,
442
+ 0.406
443
+ ],
444
+ "angle": 0,
445
+ "content": "In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks."
446
+ },
447
+ {
448
+ "type": "title",
449
+ "bbox": [
450
+ 0.217,
451
+ 0.416,
452
+ 0.347,
453
+ 0.43
454
+ ],
455
+ "angle": 0,
456
+ "content": "III. EXPERIMENT"
457
+ },
458
+ {
459
+ "type": "title",
460
+ "bbox": [
461
+ 0.068,
462
+ 0.438,
463
+ 0.156,
464
+ 0.451
465
+ ],
466
+ "angle": 0,
467
+ "content": "A. Dataset"
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.067,
473
+ 0.455,
474
+ 0.488,
475
+ 0.609
476
+ ],
477
+ "angle": 0,
478
+ "content": "This study employs the Kaggle \"Credit Card Fraud Detection\" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation."
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.067,
484
+ 0.615,
485
+ 0.487,
486
+ 0.865
487
+ ],
488
+ "angle": 0,
489
+ "content": "Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into \\(70\\%\\) training, \\(15\\%\\) validation, and \\(15\\%\\) test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications."
490
+ },
491
+ {
492
+ "type": "title",
493
+ "bbox": [
494
+ 0.51,
495
+ 0.069,
496
+ 0.667,
497
+ 0.083
498
+ ],
499
+ "angle": 0,
500
+ "content": "B. Experiment Result"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.508,
506
+ 0.086,
507
+ 0.93,
508
+ 0.378
509
+ ],
510
+ "angle": 0,
511
+ "content": "This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance."
512
+ },
513
+ {
514
+ "type": "table_caption",
515
+ "bbox": [
516
+ 0.619,
517
+ 0.392,
518
+ 0.819,
519
+ 0.405
520
+ ],
521
+ "angle": 0,
522
+ "content": "Table 1. Integration Testing 1"
523
+ },
524
+ {
525
+ "type": "table",
526
+ "bbox": [
527
+ 0.51,
528
+ 0.405,
529
+ 0.93,
530
+ 0.528
531
+ ],
532
+ "angle": 0,
533
+ "content": "<table><tr><td>Model</td><td>AUC</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>GAN [20]</td><td>0.842</td><td>0.716</td><td>0.654</td><td>0.684</td></tr><tr><td>ADASYN [21]</td><td>0.856</td><td>0.729</td><td>0.668</td><td>0.697</td></tr><tr><td>SMOTE [22]</td><td>0.871</td><td>0.742</td><td>0.683</td><td>0.711</td></tr><tr><td>BRF [23]</td><td>0.889</td><td>0.764</td><td>0.721</td><td>0.742</td></tr><tr><td>XGBOOST-Cost [24]</td><td>0.903</td><td>0.779</td><td>0.735</td><td>0.757</td></tr><tr><td>SAAD [25]</td><td>0.915</td><td>0.793</td><td>0.751</td><td>0.771</td></tr><tr><td>HAN [26]</td><td>0.927</td><td>0.806</td><td>0.768</td><td>0.786</td></tr><tr><td>Ours</td><td>0.941</td><td>0.822</td><td>0.785</td><td>0.803</td></tr></table>"
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.508,
539
+ 0.53,
540
+ 0.929,
541
+ 0.655
542
+ ],
543
+ "angle": 0,
544
+ "content": "Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.508,
550
+ 0.66,
551
+ 0.93,
552
+ 0.814
553
+ ],
554
+ "angle": 0,
555
+ "content": "DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory."
556
+ }
557
+ ],
558
+ [
559
+ {
560
+ "type": "image",
561
+ "bbox": [
562
+ 0.084,
563
+ 0.082,
564
+ 0.471,
565
+ 0.265
566
+ ],
567
+ "angle": 0,
568
+ "content": null
569
+ },
570
+ {
571
+ "type": "image_caption",
572
+ "bbox": [
573
+ 0.177,
574
+ 0.286,
575
+ 0.404,
576
+ 0.301
577
+ ],
578
+ "angle": 0,
579
+ "content": "Figure 2. Loss function drop graph"
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.071,
585
+ 0.308,
586
+ 0.486,
587
+ 0.488
588
+ ],
589
+ "angle": 0,
590
+ "content": "From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns."
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.071,
596
+ 0.495,
597
+ 0.486,
598
+ 0.853
599
+ ],
600
+ "angle": 0,
601
+ "content": "A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3."
602
+ },
603
+ {
604
+ "type": "image",
605
+ "bbox": [
606
+ 0.518,
607
+ 0.07,
608
+ 0.949,
609
+ 0.35
610
+ ],
611
+ "angle": 0,
612
+ "content": null
613
+ },
614
+ {
615
+ "type": "image_caption",
616
+ "bbox": [
617
+ 0.586,
618
+ 0.362,
619
+ 0.856,
620
+ 0.376
621
+ ],
622
+ "angle": 0,
623
+ "content": "Figure 3. T-SNE result map after training"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.514,
629
+ 0.382,
630
+ 0.928,
631
+ 0.535
632
+ ],
633
+ "angle": 0,
634
+ "content": "From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness."
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.514,
640
+ 0.542,
641
+ 0.928,
642
+ 0.667
643
+ ],
644
+ "angle": 0,
645
+ "content": "Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class."
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.514,
651
+ 0.673,
652
+ 0.928,
653
+ 0.853
654
+ ],
655
+ "angle": 0,
656
+ "content": "However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance."
657
+ }
658
+ ],
659
+ [
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.216,
664
+ 0.069,
665
+ 0.347,
666
+ 0.082
667
+ ],
668
+ "angle": 0,
669
+ "content": "IV. CONCLUSION"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.067,
675
+ 0.088,
676
+ 0.487,
677
+ 0.283
678
+ ],
679
+ "angle": 0,
680
+ "content": "This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.067,
686
+ 0.288,
687
+ 0.487,
688
+ 0.566
689
+ ],
690
+ "angle": 0,
691
+ "content": "Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.233,
697
+ 0.575,
698
+ 0.321,
699
+ 0.588
700
+ ],
701
+ "angle": 0,
702
+ "content": "REFERENCES"
703
+ },
704
+ {
705
+ "type": "ref_text",
706
+ "bbox": [
707
+ 0.07,
708
+ 0.593,
709
+ 0.486,
710
+ 0.64
711
+ ],
712
+ "angle": 0,
713
+ "content": "[1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, \"A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction,\" arXiv preprint arXiv:2502.06847, 2025."
714
+ },
715
+ {
716
+ "type": "ref_text",
717
+ "bbox": [
718
+ 0.071,
719
+ 0.642,
720
+ 0.485,
721
+ 0.677
722
+ ],
723
+ "angle": 0,
724
+ "content": "[2] J. Liu, \"Multimodal Data-Driven Factor Models for Stock Market Forecasting,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969."
725
+ },
726
+ {
727
+ "type": "ref_text",
728
+ "bbox": [
729
+ 0.071,
730
+ 0.679,
731
+ 0.485,
732
+ 0.725
733
+ ],
734
+ "angle": 0,
735
+ "content": "[3] Y. Deng, \"A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727."
736
+ },
737
+ {
738
+ "type": "ref_text",
739
+ "bbox": [
740
+ 0.071,
741
+ 0.728,
742
+ 0.486,
743
+ 0.773
744
+ ],
745
+ "angle": 0,
746
+ "content": "[4] P. Feng, \"Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026."
747
+ },
748
+ {
749
+ "type": "ref_text",
750
+ "bbox": [
751
+ 0.071,
752
+ 0.776,
753
+ 0.485,
754
+ 0.812
755
+ ],
756
+ "angle": 0,
757
+ "content": "[5] X. Du, \"Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection,\" Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024."
758
+ },
759
+ {
760
+ "type": "ref_text",
761
+ "bbox": [
762
+ 0.071,
763
+ 0.814,
764
+ 0.487,
765
+ 0.849
766
+ ],
767
+ "angle": 0,
768
+ "content": "[6] S. Moolchandani, \"Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking,\" International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024."
769
+ },
770
+ {
771
+ "type": "ref_text",
772
+ "bbox": [
773
+ 0.071,
774
+ 0.851,
775
+ 0.485,
776
+ 0.898
777
+ ],
778
+ "angle": 0,
779
+ "content": "[7] S. Arya, T. Rahman and V. Gogate, \"Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models,\" Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024."
780
+ },
781
+ {
782
+ "type": "list",
783
+ "bbox": [
784
+ 0.07,
785
+ 0.593,
786
+ 0.487,
787
+ 0.898
788
+ ],
789
+ "angle": 0,
790
+ "content": null
791
+ },
792
+ {
793
+ "type": "ref_text",
794
+ "bbox": [
795
+ 0.513,
796
+ 0.067,
797
+ 0.928,
798
+ 0.102
799
+ ],
800
+ "angle": 0,
801
+ "content": "[8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion,\" arXiv preprint arXiv:2502.03664, 2025."
802
+ },
803
+ {
804
+ "type": "ref_text",
805
+ "bbox": [
806
+ 0.514,
807
+ 0.104,
808
+ 0.929,
809
+ 0.14
810
+ ],
811
+ "angle": 0,
812
+ "content": "[9] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction,\" Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949."
813
+ },
814
+ {
815
+ "type": "ref_text",
816
+ "bbox": [
817
+ 0.513,
818
+ 0.142,
819
+ 0.928,
820
+ 0.188
821
+ ],
822
+ "angle": 0,
823
+ "content": "[10] Q. Sun and S. Duan, \"User Intent Prediction and Response in Human-Computer Interaction via BiLSTM,\" Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042."
824
+ },
825
+ {
826
+ "type": "ref_text",
827
+ "bbox": [
828
+ 0.514,
829
+ 0.19,
830
+ 0.928,
831
+ 0.226
832
+ ],
833
+ "angle": 0,
834
+ "content": "[11] Y. Wang, \"Time-Series Premium Risk Prediction via Bidirectional Transformer,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913."
835
+ },
836
+ {
837
+ "type": "ref_text",
838
+ "bbox": [
839
+ 0.514,
840
+ 0.228,
841
+ 0.928,
842
+ 0.274
843
+ ],
844
+ "angle": 0,
845
+ "content": "[12] T. Zhou, Z. Xu and J. Du, \"Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks,\" Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719."
846
+ },
847
+ {
848
+ "type": "ref_text",
849
+ "bbox": [
850
+ 0.514,
851
+ 0.277,
852
+ 0.929,
853
+ 0.323
854
+ ],
855
+ "angle": 0,
856
+ "content": "[13] X. Wang, \"Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection,\" Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181."
857
+ },
858
+ {
859
+ "type": "ref_text",
860
+ "bbox": [
861
+ 0.514,
862
+ 0.326,
863
+ 0.928,
864
+ 0.372
865
+ ],
866
+ "angle": 0,
867
+ "content": "[14] X. Sun, \"Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency\", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261."
868
+ },
869
+ {
870
+ "type": "ref_text",
871
+ "bbox": [
872
+ 0.514,
873
+ 0.374,
874
+ 0.928,
875
+ 0.432
876
+ ],
877
+ "angle": 0,
878
+ "content": "[15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, \"The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence,\" Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024."
879
+ },
880
+ {
881
+ "type": "ref_text",
882
+ "bbox": [
883
+ 0.514,
884
+ 0.434,
885
+ 0.928,
886
+ 0.469
887
+ ],
888
+ "angle": 0,
889
+ "content": "[16] P. Li, \"Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment,\" Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024."
890
+ },
891
+ {
892
+ "type": "ref_text",
893
+ "bbox": [
894
+ 0.514,
895
+ 0.471,
896
+ 0.929,
897
+ 0.516
898
+ ],
899
+ "angle": 0,
900
+ "content": "[17] S. Wang, R. Zhang and X. Shi, \"Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction,\" Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025."
901
+ },
902
+ {
903
+ "type": "ref_text",
904
+ "bbox": [
905
+ 0.514,
906
+ 0.519,
907
+ 0.928,
908
+ 0.555
909
+ ],
910
+ "angle": 0,
911
+ "content": "[18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, \"A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning,\" arXiv preprint arXiv:2502.09086, 2025."
912
+ },
913
+ {
914
+ "type": "ref_text",
915
+ "bbox": [
916
+ 0.514,
917
+ 0.557,
918
+ 0.928,
919
+ 0.604
920
+ ],
921
+ "angle": 0,
922
+ "content": "[19] Y. Yao, \"Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets,\" Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117."
923
+ },
924
+ {
925
+ "type": "ref_text",
926
+ "bbox": [
927
+ 0.514,
928
+ 0.606,
929
+ 0.928,
930
+ 0.652
931
+ ],
932
+ "angle": 0,
933
+ "content": "[20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, \"Generative Adversarial Nets,\" Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014."
934
+ },
935
+ {
936
+ "type": "ref_text",
937
+ "bbox": [
938
+ 0.514,
939
+ 0.654,
940
+ 0.928,
941
+ 0.701
942
+ ],
943
+ "angle": 0,
944
+ "content": "[21] H. He and Y. Bai, \"ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning,\" Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008."
945
+ },
946
+ {
947
+ "type": "ref_text",
948
+ "bbox": [
949
+ 0.514,
950
+ 0.703,
951
+ 0.929,
952
+ 0.739
953
+ ],
954
+ "angle": 0,
955
+ "content": "[22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, \"SMOTE: Synthetic Minority Over-Sampling Technique,\" Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002."
956
+ },
957
+ {
958
+ "type": "ref_text",
959
+ "bbox": [
960
+ 0.514,
961
+ 0.741,
962
+ 0.928,
963
+ 0.765
964
+ ],
965
+ "angle": 0,
966
+ "content": "[23] A. Liaw and M. Wiener, \"Classification and Regression by randomForest,\" R News, vol. 2, no. 3, pp. 18-22, 2002."
967
+ },
968
+ {
969
+ "type": "ref_text",
970
+ "bbox": [
971
+ 0.514,
972
+ 0.766,
973
+ 0.928,
974
+ 0.801
975
+ ],
976
+ "angle": 0,
977
+ "content": "[24] T. Chen and C. Guestrin, \"XGBoost: A Scalable Tree Boosting System,\" Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016."
978
+ },
979
+ {
980
+ "type": "ref_text",
981
+ "bbox": [
982
+ 0.514,
983
+ 0.804,
984
+ 0.928,
985
+ 0.839
986
+ ],
987
+ "angle": 0,
988
+ "content": "[25] Y. Zhou and R. C. Paffenroth, \"Self-Attention Anomaly Detection,\" Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019."
989
+ },
990
+ {
991
+ "type": "ref_text",
992
+ "bbox": [
993
+ 0.514,
994
+ 0.841,
995
+ 0.928,
996
+ 0.898
997
+ ],
998
+ "angle": 0,
999
+ "content": "[26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, \"Hierarchical Attention Networks for Document Classification,\" Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016."
1000
+ },
1001
+ {
1002
+ "type": "list",
1003
+ "bbox": [
1004
+ 0.513,
1005
+ 0.067,
1006
+ 0.929,
1007
+ 0.898
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": null
1011
+ }
1012
+ ]
1013
+ ]
data/2025/2504_05xxx/2504.05758/607ad388-a3c2-41a7-ba56-f696cce741af_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47df69e5f480452f7109af4923eed045d1c2782fbab3f638fc73bf575a2ff701
3
+ size 513265
data/2025/2504_05xxx/2504.05758/full.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference
2
+
3
+ Yujia Lou
4
+ University of Rochester
5
+ Rochester, USA
6
+
7
+ Jie Liu
8
+ University of Minnesota
9
+ Minneapolis, USA
10
+
11
+ Yuan Sheng Northeastern University Seattle, USA
12
+
13
+ Jiawei Wang
14
+ University of California Los Angeles, USA
15
+
16
+ Yiwei Zhang Cornell University Ithaca, USA
17
+
18
+ Yaokun Ren* Northeastern University Seattle, USA
19
+
20
+ Abstract-This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle "Credit Card Fraud Detection" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research.
21
+
22
+ Keywords-Deep probabilistic graphical models; Imbalanced data classification; Variational inference; Adversarial learning
23
+
24
+ # I. INTRODUCTION
25
+
26
+ In modern data analysis and machine learning research, the issue of data imbalance has emerged as a critical challenge affecting classifier performance. Many real-world classification tasks exhibit highly skewed class distributions, such as rare disease detection in market risk analysis and prediction [1], financial fraud detection [2-4], and cybersecurity anomaly [5] detection. In these scenarios, the number of minority class samples is significantly lower than that of the majority class. Traditional machine learning models often prioritize optimizing overall classification accuracy, leading to a substantially reduced recognition rate for minority class instances. Existing approaches primarily include data-level resampling strategies, algorithm-level cost-sensitive learning, and adaptive training strategies in deep learning. However, these methods still face several challenges in practical applications, such as potential noise introduction in resampling, the need for precise loss
27
+
28
+ weight tuning in cost-sensitive learning, and the limited generalization ability of deep learning models on imbalanced data [6]. Consequently, designing more robust and generalizable classification methods to enhance the performance of imbalanced data classification remains a core research problem.
29
+
30
+ In recent years, deep probabilistic graphical models (DPGMs) have gained significant attention in machine learning and pattern recognition due to their powerful representation capabilities and uncertainty modeling. Probabilistic graphical models integrate probabilistic statistical theory with graph-based methods, effectively capturing complex dependencies among variables and modeling data uncertainty [7]. Compared to conventional deep neural networks, probabilistic graphical models offer notable advantages in small-sample learning, data sparsity, and uncertainty reasoning. The emergence of Bayesian deep learning, variational inference, and graph neural networks has further strengthened the applicability of DPGMs in addressing data imbalance challenges [8]. By incorporating probabilistic priors and posterior distributions, these models can more accurately characterize minority class data while effectively accounting for uncertainty in decision-making [9], thereby enhancing the classifier's sensitivity to minority class instances. Given this, exploring how to leverage the strengths of deep probabilistic graphical models to develop a more robust imbalanced data classification framework holds both theoretical significance and practical potential.
31
+
32
+ The purpose of this study is to investigate the effectiveness of deep probabilistic graphical models (DPGMs) in imbalanced data classification, introducing a novel strategy that emphasizes improved performance on minority class samples. Unlike heuristic methods, this approach leverages the generative capabilities of DPGMs through adaptive probabilistic modeling and structural learning, capturing richer representations of underrepresented samples. Variational inference and Bayesian optimization further refine model parameters, enhancing classification robustness while expanding theoretical insights into DPGMs. Beyond its methodological contributions, the proposed model has notable practical value. In human-computer interaction and the financial sector, user intent prediction methods [10] and time-series risk prediction strategies [11] further underscore how DPGMs can adapt to
33
+
34
+ diverse data structures and real-time processing requirements. Moreover, the efficient market signal detection approach proposed by Zhou et al. [12] highlights the role of advanced neural architectures in combination with DPGMs for continuous data streams. These complementary research directions illustrate the wide-ranging applicability and potential extensions of deep probabilistic models in various industries and research fields. By integrating probabilistic inference and deep learning, this work not only addresses the challenges of imbalanced classification but also broadens the application of deep probabilistic approaches, ultimately enriching the machine learning toolkit for various critical domains.
35
+
36
+ # II. METHOD
37
+
38
+ Suppose dataset $D = \{(x_{i},y_{i})\}_{i = 1}^{N}$ , where $x_{i}\in R^{d}$ represents input samples and $y_{i}\in \{0,1\}$ represents category labels. Assuming that the ratio of positive and negative samples is seriously unbalanced, that is, $|\{yi = 1\} | < < |\{y_i = 0\} |$ , traditional deep learning methods tend to favor the majority class when optimizing the loss function. Therefore, we introduce deep probabilistic graphical models (DPGMs). By constructing a joint probability distribution and incorporating variational inference techniques inspired by Wang [13], the proposed model adaptively enhances the representation capability for minority class samples, leading to improved classification performance. This approach effectively mitigates the challenges posed by class imbalance, thereby ensuring that minority class samples are accurately and adequately represented within the learned feature space. Additionally, leveraging dynamic distributed scheduling methodologies as discussed by Sun [14] enables efficient handling of data streams, optimizing both task delays and load balancing. Such strategies significantly contribute to maintaining computational efficiency and enhancing the real-time responsiveness of the system. Furthermore, the synergistic integration of deep learning methods and neural architecture search techniques outlined by Yan et al. [15] further refines the adaptive representational adjustments, ensuring the robustness, accuracy, and generalization capabilities of the proposed classification framework. The architecture of the probabilistic graphical model is shown in Figure 1.
39
+
40
+ First, we define a hidden variable $z$ to model the potential representation of the input data $x$ , and use the Bayesian generation model to describe the data generation process:
41
+
42
+ $$
43
+ p (x, y, z) = p (y \mid z) p (z \mid x) p (x)
44
+ $$
45
+
46
+ Among them, $p(y \mid z)$ represents the posterior distribution of the classifier for the latent variable, and $p(z \mid x)$ represents the prior distribution of the latent variable. Based on this, our goal is to optimize the model parameters by maximizing the marginal log-likelihood:
47
+
48
+ $$
49
+ \log (y \mid x) = \log \int p (y \mid z) p (z \mid x) d z
50
+ $$
51
+
52
+ ![](images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg)
53
+ Figure 1. The architecture of the probabilistic graphical model
54
+
55
+ However, this integral is difficult to compute directly, so we use variational inference to approximate the solution. Define a variational distribution $q(z|x)$ to approximate $p(z|x)$ , and optimize the model through the evidence lower bound (ELBO):
56
+
57
+ $$
58
+ \log p (y \mid x) \geq E _ {q (z \mid x)} [ \log p (y \mid z) ] - D _ {K L} (q (z \mid x) \| p (z))
59
+ $$
60
+
61
+ Among them, $D_{KL}(\cdot \| \cdot)$ represents the Kullback-Leibler divergence, which is used to measure the gap between the approximate distribution and the true posterior distribution. In order to further optimize the classification performance of minority classes, we introduce category-aware variational inference and explicitly enhance the weight of minority class samples in the loss function:
62
+
63
+ $$
64
+ L = \sum_ {i = 1} ^ {N} w (y _ {i}) \left[ E _ {q (z | x _ {i})} [ \log p (y _ {i} \mid z) ] - \right.
65
+ $$
66
+
67
+ Among them, $w(y_{i})$ is the category weight coefficient, and a higher weight is set for minority class samples, for example:
68
+
69
+ $$
70
+ w \left(y _ {i}\right) = \frac {N _ {\text {m a j o r}}}{N _ {\text {m i n o r}}}
71
+ $$
72
+
73
+ Where A and B represent the number of samples in the majority class and the minority class, respectively.
74
+
75
+ In the specific implementation, we use variational autoencoder (VAE) as the probability generation model, so that $p(z \mid x)$ obeys the normal distribution:
76
+
77
+ $$
78
+ q (z \mid x) = N (\mu (x), \sigma^ {2} (x))
79
+ $$
80
+
81
+ And optimize it by reparameterization technique:
82
+
83
+ $$
84
+ z = \mu (x) + \sigma (x) \cdot \varepsilon , \quad \varepsilon \sim \mathrm {N} (0, \mathrm {I})
85
+ $$
86
+
87
+ In this way, the model can learn A and B through the neural network to obtain a more stable gradient. In addition, building
88
+
89
+ on previous adversarial learning frameworks [16] and generative design concepts [17], we introduce an adversarial learning mechanism to optimize the category distribution. This mechanism strengthens the model's ability to differentiate minority class samples by ensuring that generated data more closely matches real distributions. Specifically, a discriminator is constructed to distinguish the distribution of generated minority class samples from authentic instances, ensuring closer alignment with observed data. Furthermore, incorporating few-shot learning strategies [18] and dynamic adaptation techniques [19] enhances the model's resilience in limited-data conditions. The optimization goal of the discriminator is as follows:
90
+
91
+ $$
92
+ \min _ {G} \max _ {D} E _ {x \sim p _ {\text {d a t a}} (x)} [ \log D (z) ] +
93
+ $$
94
+
95
+ $$
96
+ E _ {x \sim q _ {(z | x)}} [ \log (1 - D (z)) ]
97
+ $$
98
+
99
+ Through this adversarial learning method, the model can capture the characteristics of minority classes more accurately and avoid the overfitting problem of minority class samples.
100
+
101
+ In summary, this study combines deep probabilistic graph models, variational reasoning, and adversarial learning methods to optimize imbalanced data classification tasks.
102
+
103
+ # III. EXPERIMENT
104
+
105
+ # A. Dataset
106
+
107
+ This study employs the Kaggle "Credit Card Fraud Detection" dataset, consisting of 284,807 credit card transactions from a European institution. Of these transactions, 492 are labeled as fraudulent, indicating a highly imbalanced class distribution (approximately 1:577). Each record contains 30 features, including 28 anonymized components derived from Principal Component Analysis (PCA), along with transaction time and amount. Personally identifiable information has been removed, leaving only numerical features, which were preprocessed through normalization, outlier detection, and data augmentation.
108
+
109
+ Given the severe class imbalance, direct application of conventional classification models often leads to bias toward the majority class, compromising fraud detection. To address this challenge, we employed various sampling strategies, including under-sampling, over-sampling, and the Synthetic Minority Over-sampling Technique (SMOTE), to generate synthetic samples for the minority class and improve representation. We also evaluated the impact of different sampling methods on model stability and performance. The dataset was split into $70\%$ training, $15\%$ validation, and $15\%$ test sets. Evaluation metrics included Precision, Recall, F1-score, and the Area Under the Receiver Operating Characteristic Curve (AUC-ROC). Comparative experiments with different data augmentation techniques demonstrated that integrating probabilistic modeling with these strategies substantially enhances fraud detection and reduces false positives, thereby improving the model's reliability in practical applications.
110
+
111
+ # B. Experiment Result
112
+
113
+ This study primarily compares deep probabilistic graphical models (DPGMs) with several advanced imbalanced data classification methods to validate their effectiveness. First, we select the generative adversarial network (GAN)-based methods, such as WGANGP-SMOTE and ADASYN-GAN, which leverage GANs to synthesize minority class samples and mitigate data imbalance. Second, we evaluate class-adaptive ensemble learning methods, including Balanced Random Forest (BRF) and XGBoost-Cost Sensitive, which enhance minority class learning by adjusting sampling strategies or modifying loss functions. Additionally, we compare attention-based imbalanced classification methods, such as Self-Attention Anomaly Detection (SAAD) and Hierarchical Attention Networks (HAN), which have demonstrated strong anomaly detection capabilities in credit card fraud detection and similar tasks. Through these comparative experiments, we aim to comprehensively assess the advantages of deep probabilistic graphical models in minority class representation learning, generalization ability, and classification performance.
114
+
115
+ Table 1. Integration Testing 1
116
+
117
+ <table><tr><td>Model</td><td>AUC</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>GAN [20]</td><td>0.842</td><td>0.716</td><td>0.654</td><td>0.684</td></tr><tr><td>ADASYN [21]</td><td>0.856</td><td>0.729</td><td>0.668</td><td>0.697</td></tr><tr><td>SMOTE [22]</td><td>0.871</td><td>0.742</td><td>0.683</td><td>0.711</td></tr><tr><td>BRF [23]</td><td>0.889</td><td>0.764</td><td>0.721</td><td>0.742</td></tr><tr><td>XGBOOST-Cost [24]</td><td>0.903</td><td>0.779</td><td>0.735</td><td>0.757</td></tr><tr><td>SAAD [25]</td><td>0.915</td><td>0.793</td><td>0.751</td><td>0.771</td></tr><tr><td>HAN [26]</td><td>0.927</td><td>0.806</td><td>0.768</td><td>0.786</td></tr><tr><td>Ours</td><td>0.941</td><td>0.822</td><td>0.785</td><td>0.803</td></tr></table>
118
+
119
+ Our proposed deep probabilistic graphical model (DPGM) outperforms all compared methods on every evaluation metric, demonstrating superior generalization in imbalanced classification. With an AUC of 0.941, it clearly surpasses traditional oversampling (e.g., SMOTE, ADASYN) and ensemble methods (e.g., BRF, XGBoost-Cost Sensitive). Importantly, it achieves a Recall of 0.785 while maintaining a Precision of 0.822, reflecting its effectiveness in detecting minority class samples without overly biasing the model.
120
+
121
+ DPGMs model the latent distribution of minority samples more effectively than conventional oversampling, thereby reducing overfitting. Compared to attention-based methods (e.g., SAAD, HAN), our model delivers a higher F1-score (0.803 versus 0.786), illustrating the benefits of uncertainty-aware probabilistic modeling. Overall, these results confirm that combining deep probabilistic modeling with variational inference optimizes class distribution and enhances minority class discrimination, offering a robust solution for imbalanced data classification. Figure 2 presents the corresponding loss function trajectory.
122
+
123
+ ![](images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg)
124
+ Figure 2. Loss function drop graph
125
+
126
+ From the loss function decline curve, both the training loss (Train) and test loss (Test) exhibit a clear downward trend during training iterations. This indicates that the model continuously learns features and optimizes parameters to effectively reduce errors. In the initial phase of training (between 5,000 and 25,000 iterations), the loss decreases at the fastest rate, suggesting that the model rapidly learns data representations and significantly improves classification performance. However, as the number of iterations increases, the rate of loss reduction gradually slows down and stabilizes after approximately 125,000 iterations. This trend implies that the model is approaching convergence, where further optimization yields diminishing returns.
127
+
128
+ A comparison of the training and test loss curves reveals that the test loss consistently remains lower than the training loss, and both curves follow a similar trajectory. This observation suggests that the model demonstrates good generalization ability without exhibiting significant overfitting. If the training loss were substantially lower than the test loss, it would indicate that the model performs well on training data but struggles to generalize to unseen data. However, the current loss curves do not display such a pattern, implying that the applied regularization strategies and optimization methods effectively mitigate overfitting. Furthermore, the test loss decreases at a rate similar to that of the training loss in the initial stages, further validating the model's stable learning process. Overall, these experimental results confirm that the model successfully optimizes the loss function during training, leading to a substantial reduction in both training and test errors ultimately reaching a relatively low level. This outcome suggests that the chosen training strategy, hyperparameter configuration, and optimization techniques are effective, allowing the model to learn the data distribution efficiently while maintaining strong generalization performance. Additionally, the stabilization of the loss curves indicates that the training process has effectively converged, suggesting that training can be halted or fine-tuned further to ensure optimal performance on the test set. Finally, this paper also gives the T-SNE results after training, as shown in Figure 3.
129
+
130
+ ![](images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg)
131
+ Figure 3. T-SNE result map after training
132
+
133
+ From the T-SNE results, it is evident that after training, the data points form distinct cluster-like distributions in the two-dimensional space, indicating that the model has successfully learned the feature differences between different classes. As observed in the visualization, the two categories (represented in blue and red) are well separated, suggesting that the model has developed strong discriminative capabilities in the high-dimensional feature space. The presence of a clear boundary between the classes demonstrates that the model effectively extracts distinguishing features without causing sample overlap, thereby validating its effectiveness.
134
+
135
+ Furthermore, the overall data distribution demonstrates that the T-SNE dimensionality reduction retains intra-class compactness while ensuring inter-class separability. The blue and red data points are well-clustered in distinct regions without significant overlap, indicating that the model effectively distinguishes between different categories in the feature space. Even when dealing with an imbalanced dataset, the model successfully learns the distribution patterns of the minority class.
136
+
137
+ However, while the T-SNE results illustrate a clear class separation, further quantitative evaluation is necessary to assess the robustness of the classification boundaries. For instance, if significant distribution shifts occur in certain test data samples, it may indicate that the model is still susceptible to overfitting. Additionally, since T-SNE is a nonlinear dimensionality reduction method, it may exaggerate the separation between classes, meaning that the actual decision boundaries in the high-dimensional space may not be as well-defined as they appear in the visualization. Therefore, a comprehensive evaluation incorporating classification metrics such as Precision, Recall, and AUC is essential to fully validate the model's generalization performance.
138
+
139
+ # IV. CONCLUSION
140
+
141
+ This study proposes an imbalanced data classification method based on deep probabilistic graphical models (DPGMs) and validates its effectiveness through experiments on a credit card fraud detection dataset. The experimental results demonstrate that the proposed method outperforms traditional oversampling techniques, ensemble learning approaches, and attention-based models in key metrics such as AUC and F1-score, confirming the effectiveness of probabilistic modeling in handling imbalanced classification tasks. By integrating variational inference, class-weight adjustment, and adversarial learning mechanisms, our model more accurately captures the feature distribution of the minority class, enhancing the classifier's discriminative ability while mitigating the overfitting issues commonly observed in traditional methods.
142
+
143
+ Despite the promising performance of our approach in imbalanced data classification, several aspects warrant further improvement. For instance, in cases of extreme imbalance, the minority class samples may still provide insufficient information, potentially limiting the model's generalization capability. Additionally, deep probabilistic graphical models involve high computational complexity, requiring extensive sampling and variational inference steps during training, which may impact deployment efficiency. Therefore, future research could focus on optimizing the computational efficiency of probabilistic modeling to enhance the model's adaptability across different data distributions. Several directions can be explored in future research. More efficient Bayesian optimization methods can be investigated to reduce the computational cost of DPGMs, making them applicable to larger-scale imbalanced datasets. Furthermore, in practical applications, federated learning frameworks can be incorporated to enable cross-institutional model training while preserving data privacy, thereby enhancing the applicability of imbalanced classification methods in real-world scenarios.
144
+
145
+ # REFERENCES
146
+
147
+ [1] Y. Cheng, Z. Xu, Y. Chen, Y. Wang, Z. Lin and J. Liu, "A Deep Learning Framework Integrating CNN and BiLSTM for Financial Systemic Risk Analysis and Prediction," arXiv preprint arXiv:2502.06847, 2025.
148
+ [2] J. Liu, "Multimodal Data-Driven Factor Models for Stock Market Forecasting," Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984969.
149
+ [3] Y. Deng, "A Hybrid Network Congestion Prediction Method Integrating Association Rules and LSTM for Enhanced Spatiotemporal Forecasting," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912727.
150
+ [4] P. Feng, "Hybrid BiLSTM-Transformer Model for Identifying Fraudulent Transactions in Financial Systems," Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985026.
151
+ [5] X. Du, "Optimized Convolutional Neural Network for Intelligent Financial Statement Anomaly Detection," Journal of Computer Technology and Software, vol. 3, no. 9, pp. 11-15, 2024.
152
+ [6] S. Moolchandani, "Advancing Credit Risk Management: Embracing Probabilistic Graphical Models in Banking," International Journal of Science and Research (IJSR), vol. 13, no. 6, pp. 74-80, 2024.
153
+ [7] S. Arya, T. Rahman and V. Gogate, "Learning to Solve the Constrained Most Probable Explanation Task in Probabilistic Graphical Models," Proceedings of the 2024 International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, pp. 2791-2799, 2024.
154
+
155
+ [8] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, "Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion," arXiv preprint arXiv:2502.03664, 2025.
156
+ [9] J. Zhan, "Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction," Journal of Computer Technology and Software, vol. 4, no. 2, 2025, https://doi.org/10.5281/zenodo.14984949.
157
+ [10] Q. Sun and S. Duan, "User Intent Prediction and Response in Human-Computer Interaction via BiLSTM," Journal of Computer Science and Software Applications, vol. 5, no. 3, 2025, https://doi.org/10.5281/zenodo.14985042.
158
+ [11] Y. Wang, "Time-Series Premium Risk Prediction via Bidirectional Transformer," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14955913.
159
+ [12] T. Zhou, Z. Xu and J. Du, "Efficient Market Signal Prediction for Blockchain HFT with Temporal Convolutional Networks," Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025, https://doi.org/10.5281/zenodo.14912719.
160
+ [13] X. Wang, "Data Mining Framework Leveraging Stable Diffusion: A Unified Approach for Classification and Anomaly Detection," Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14843181.
161
+ [14] X. Sun, "Dynamic Distributed Scheduling for Data Stream Computing: Balancing Task Delay and Load Efficiency", Journal of Computer Technology and Software, vol. 4, no. 1, 2025, https://doi.org/10.5281/zenodo.14785261.
162
+ [15] X. Yan, J. Du, L. Wang, Y. Liang, J. Hu and B. Wang, "The Synergistic Role of Deep Learning and Neural Architecture Search in Advancing Artificial Intelligence," Proceedings of the 2024 International Conference on Electronics and Devices, Computational Science (ICEDCS), pp. 452-456, Sep. 2024.
163
+ [16] P. Li, "Improved Transformer for Cross-Domain Knowledge Extraction with Feature Alignment," Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024.
164
+ [17] S. Wang, R. Zhang and X. Shi, "Generative UI Design with Diffusion Models: Exploring Automated Interface Creation and Human-Computer Interaction," Transactions on Computational and Scientific Methods, vol. 5, no. 3, 2025.
165
+ [18] J. Gao, S. Lyu, G. Liu, B. Zhu, H. Zheng and X. Liao, "A Hybrid Model for Few-Shot Text Classification Using Transfer and Meta-Learning," arXiv preprint arXiv:2502.09086, 2025.
166
+ [19] Y. Yao, "Time-Series Nested Reinforcement Learning for Dynamic Risk Control in Nonlinear Financial Markets," Transactions on Computational and Scientific Methods, vol. 5, no. 1, 2025, https://doi.org/10.5281/zenodo.14677117.
167
+ [20] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville and Y. Bengio, "Generative Adversarial Nets," Proceedings of the 27th Advances in Neural Information Processing Systems (NeurIPS), pp. 1–9, 2014.
168
+ [21] H. He and Y. Bai, "ADASYN: Adaptive Synthetic Sampling Approach for Imbalanced Learning," Proceedings of the IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence), pp. 1322-1328, 2008.
169
+ [22] N. V. Chawla, K. W. Bowyer, L. O. Hall and W. P. Kegelmeyer, "SMOTE: Synthetic Minority Over-Sampling Technique," Journal of Artificial Intelligence Research, vol. 16, pp. 321-357, 2002.
170
+ [23] A. Liaw and M. Wiener, "Classification and Regression by randomForest," R News, vol. 2, no. 3, pp. 18-22, 2002.
171
+ [24] T. Chen and C. Guestrin, "XGBoost: A Scalable Tree Boosting System," Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 785-794, 2016.
172
+ [25] Y. Zhou and R. C. Paffenroth, "Self-Attention Anomaly Detection," Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 2774-2782, 2019.
173
+ [26] Z. Yang, D. Yang, C. Dyer, X. He, A. Smola and E. Hovy, "Hierarchical Attention Networks for Document Classification," Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1480-1489, 2016.
data/2025/2504_05xxx/2504.05758/images/1e94108d7e948cbe2234be390b158b637b2839b2600cdc7b10c6f6f9d8c86757.jpg ADDED

Git LFS Details

  • SHA256: 6ea807c68aa3250bcb7c4dba501088616e42e751e60a4c9ff79ec67ae9b2204a
  • Pointer size: 129 Bytes
  • Size of remote file: 4.26 kB
data/2025/2504_05xxx/2504.05758/images/3c81cf688d23b9a5c258f1a910d4b6375efaa9b4bf011b786cf38f50312871eb.jpg ADDED

Git LFS Details

  • SHA256: 18d640f10b62b03b4a8513adc32f7ac07c7b4eda62ee61147cf7adbdb0c7ba92
  • Pointer size: 129 Bytes
  • Size of remote file: 5.22 kB
data/2025/2504_05xxx/2504.05758/images/45c9c4e23af1f7fcd71e4e1f1c42005c9812a59abe82b5be235e2ec375883ea3.jpg ADDED

Git LFS Details

  • SHA256: cfbab8ed11a1678a8f825d5ee34a16ac88271ba70c79e1d2097bbf0b8c035cc0
  • Pointer size: 129 Bytes
  • Size of remote file: 4.22 kB
data/2025/2504_05xxx/2504.05758/images/4bf722e0285a33bb56a86df84ca351fde95bf2e681d8d0f451530ce70b8ce5ea.jpg ADDED

Git LFS Details

  • SHA256: dd28e2b59de2d57d61351778ed0371cba5681173b428022d3a671abbf49f21ad
  • Pointer size: 129 Bytes
  • Size of remote file: 7.88 kB
data/2025/2504_05xxx/2504.05758/images/57a239a94222df9728fff416dc0f582b2ccc3b70ec254c8b69c33076069bada5.jpg ADDED

Git LFS Details

  • SHA256: 1f42c52f9f8b544e33112afcdf38b5859bd7e4ef72c7f8c14bec504b30dc6a29
  • Pointer size: 129 Bytes
  • Size of remote file: 9.07 kB
data/2025/2504_05xxx/2504.05758/images/6ad69f33204c10e92da4741df3cc69002914239055daa6956253b03c994af45e.jpg ADDED

Git LFS Details

  • SHA256: f1f56a72efe9e28426cd2d9c6043c83248a6a05a786e9687ace68cc83be50e61
  • Pointer size: 130 Bytes
  • Size of remote file: 46.2 kB
data/2025/2504_05xxx/2504.05758/images/93059c0b0200dfbf1341842c23946121be5bcecbebeabde6e3839117f7341335.jpg ADDED

Git LFS Details

  • SHA256: 71ebaeae93154720fb4d8027c3e697338a54a845541ceb57c818d8bea29f014b
  • Pointer size: 129 Bytes
  • Size of remote file: 3.58 kB
data/2025/2504_05xxx/2504.05758/images/acdfe8a5e054f9435eea5074a1a9c3feb17a8c5ca109e1f7b62d7971197424bb.jpg ADDED

Git LFS Details

  • SHA256: 81889ef35c89581181229e8f9d470a9167b2e338b3a8f40ea353bc5fd6ff13c7
  • Pointer size: 129 Bytes
  • Size of remote file: 3.29 kB
data/2025/2504_05xxx/2504.05758/images/bb202b08b189fda03ed9f8a34755cc4f9f244fe15241e298af9a4ebaca3f9733.jpg ADDED

Git LFS Details

  • SHA256: a6a5b059796ccc4a4a502a3a440fc3b8b1472d72fbb69e9c7c71a7e05090c741
  • Pointer size: 129 Bytes
  • Size of remote file: 5.8 kB
data/2025/2504_05xxx/2504.05758/images/c8945917346c994832f059f7db5489e15026b3e9e6b1a93768160bb4b7790869.jpg ADDED

Git LFS Details

  • SHA256: 19bfa36918913fb79f881e34f4041a3410e05352ce92b99633182d1bd77814c9
  • Pointer size: 130 Bytes
  • Size of remote file: 22.6 kB
data/2025/2504_05xxx/2504.05758/images/e52f8ad9e8d735a3ab74ab8773298cc5f69f2a2f31b5ee1d75b171e7cdd13362.jpg ADDED

Git LFS Details

  • SHA256: 071612ed2b5b30b5b58e5b211878dd2ef1b011bae939b60ff27aa4ca6344b269
  • Pointer size: 129 Bytes
  • Size of remote file: 4.93 kB
data/2025/2504_05xxx/2504.05758/images/e739f93a7fbde9cf2a99b06d3897132dc8dbed0714b55865b79655a88af9e655.jpg ADDED

Git LFS Details

  • SHA256: 789d1e670f223ddeb323d2df129cd32faa353832e3b0bfaceb6088181c137e6e
  • Pointer size: 130 Bytes
  • Size of remote file: 25.4 kB
data/2025/2504_05xxx/2504.05758/images/ffd677e4e9724b33fd97b088d2b56b347e397c7cd6d17964076bb5ea1b746ab4.jpg ADDED

Git LFS Details

  • SHA256: bf53ba3483f07cca48303891de6035334483ac87d4d3bee71268f47634d21362
  • Pointer size: 130 Bytes
  • Size of remote file: 47 kB
data/2025/2504_05xxx/2504.05758/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_content_list.json ADDED
@@ -0,0 +1,1200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 98,
8
+ 108,
9
+ 898,
10
+ 131
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jirong Zha $^{1*}$ , Yuxuan Fan $^{2*}$ , Xiao Yang $^{2}$ , Chen Gao $^{1\\dagger}$ , Xinlei Chen $^{1\\dagger}$",
17
+ "bbox": [
18
+ 192,
19
+ 147,
20
+ 799,
21
+ 167
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Tsinghua University",
28
+ "bbox": [
29
+ 413,
30
+ 170,
31
+ 583,
32
+ 188
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "2The Hong Kong University of Science and Technology (Guang Zhou)",
39
+ "bbox": [
40
+ 220,
41
+ 189,
42
+ 777,
43
+ 207
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn",
50
+ "bbox": [
51
+ 161,
52
+ 209,
53
+ 836,
54
+ 243
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Abstract",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 245,
64
+ 271,
65
+ 323,
66
+ 287
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications.",
73
+ "bbox": [
74
+ 114,
75
+ 297,
76
+ 455,
77
+ 632
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "1 Introduction",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 83,
87
+ 657,
88
+ 223,
89
+ 672
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024].",
96
+ "bbox": [
97
+ 81,
98
+ 680,
99
+ 488,
100
+ 888
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "image",
106
+ "img_path": "images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg",
107
+ "image_caption": [
108
+ "Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information."
109
+ ],
110
+ "image_footnote": [],
111
+ "bbox": [
112
+ 514,
113
+ 271,
114
+ 908,
115
+ 411
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context.",
122
+ "bbox": [
123
+ 506,
124
+ 505,
125
+ 913,
126
+ 657
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a \"3D capacity\" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design",
133
+ "bbox": [
134
+ 506,
135
+ 666,
136
+ 915,
137
+ 891
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "aside_text",
143
+ "text": "arXiv:2504.05786v1 [cs.CV] 8 Apr 2025",
144
+ "bbox": [
145
+ 22,
146
+ 268,
147
+ 60,
148
+ 700
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned.",
155
+ "bbox": [
156
+ 81,
157
+ 68,
158
+ 486,
159
+ 109
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs.",
166
+ "bbox": [
167
+ 81,
168
+ 113,
169
+ 486,
170
+ 335
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research.",
177
+ "bbox": [
178
+ 81,
179
+ 339,
180
+ 486,
181
+ 547
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "2 Preliminary",
188
+ "text_level": 1,
189
+ "bbox": [
190
+ 83,
191
+ 561,
192
+ 218,
193
+ 578
194
+ ],
195
+ "page_idx": 1
196
+ },
197
+ {
198
+ "type": "text",
199
+ "text": "2.1 Large Language Models",
200
+ "text_level": 1,
201
+ "bbox": [
202
+ 83,
203
+ 580,
204
+ 313,
205
+ 595
206
+ ],
207
+ "page_idx": 1
208
+ },
209
+ {
210
+ "type": "text",
211
+ "text": "Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024].",
212
+ "bbox": [
213
+ 81,
214
+ 598,
215
+ 486,
216
+ 709
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "text",
222
+ "text": "Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence.",
223
+ "bbox": [
224
+ 81,
225
+ 709,
226
+ 486,
227
+ 835
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "2.2 3D Data Structures",
234
+ "text_level": 1,
235
+ "bbox": [
236
+ 83,
237
+ 843,
238
+ 276,
239
+ 857
240
+ ],
241
+ "page_idx": 1
242
+ },
243
+ {
244
+ "type": "text",
245
+ "text": "3D data has different structures, which are essential for understanding the three-dimensional world, and common methods",
246
+ "bbox": [
247
+ 83,
248
+ 859,
249
+ 486,
250
+ 888
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as",
257
+ "bbox": [
258
+ 506,
259
+ 68,
260
+ 913,
261
+ 125
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "equation",
267
+ "text": "\n$$\nP = \\left\\{p _ {i} \\in \\mathbb {R} ^ {3} \\mid i = 1, \\dots , N \\right\\},\n$$\n",
268
+ "text_format": "latex",
269
+ "bbox": [
270
+ 604,
271
+ 128,
272
+ 815,
273
+ 148
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "text",
279
+ "text": "which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel $V(i,j,k)$ storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices $\\{v_{i}\\}$ and faces $\\{F_j\\}$ , though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as",
280
+ "bbox": [
281
+ 508,
282
+ 152,
283
+ 911,
284
+ 292
285
+ ],
286
+ "page_idx": 1
287
+ },
288
+ {
289
+ "type": "equation",
290
+ "text": "\n$$\nf _ {\\theta}: \\mathbb {R} ^ {3} \\rightarrow (c, \\sigma),\n$$\n",
291
+ "text_format": "latex",
292
+ "bbox": [
293
+ 650,
294
+ 297,
295
+ 769,
296
+ 314
297
+ ],
298
+ "page_idx": 1
299
+ },
300
+ {
301
+ "type": "text",
302
+ "text": "which maps spatial coordinates to color $c$ and density $\\sigma$ . Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating $f_{\\theta}$ with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point $p_i$ with a covariance matrix $\\Sigma_i$ and color $c_i$ , efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation.",
303
+ "bbox": [
304
+ 506,
305
+ 319,
306
+ 913,
307
+ 459
308
+ ],
309
+ "page_idx": 1
310
+ },
311
+ {
312
+ "type": "text",
313
+ "text": "2.3 Proposed taxonomy",
314
+ "text_level": 1,
315
+ "bbox": [
316
+ 508,
317
+ 468,
318
+ 705,
319
+ 484
320
+ ],
321
+ "page_idx": 1
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity.",
326
+ "bbox": [
327
+ 506,
328
+ 487,
329
+ 913,
330
+ 847
331
+ ],
332
+ "page_idx": 1
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch",
337
+ "bbox": [
338
+ 506,
339
+ 845,
340
+ 913,
341
+ 888
342
+ ],
343
+ "page_idx": 1
344
+ },
345
+ {
346
+ "type": "image",
347
+ "img_path": "images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg",
348
+ "image_caption": [
349
+ "Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions."
350
+ ],
351
+ "image_footnote": [],
352
+ "bbox": [
353
+ 93,
354
+ 65,
355
+ 903,
356
+ 321
357
+ ],
358
+ "page_idx": 2
359
+ },
360
+ {
361
+ "type": "image",
362
+ "img_path": "images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg",
363
+ "image_caption": [
364
+ "Figure 3: An overview of image-based approaches."
365
+ ],
366
+ "image_footnote": [],
367
+ "bbox": [
368
+ 86,
369
+ 378,
370
+ 486,
371
+ 579
372
+ ],
373
+ "page_idx": 2
374
+ },
375
+ {
376
+ "type": "text",
377
+ "text": "must address. Figure 2 presents a detailed breakdown of representative works in each category.",
378
+ "bbox": [
379
+ 81,
380
+ 628,
381
+ 488,
382
+ 657
383
+ ],
384
+ "page_idx": 2
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "3 Recent Advances of Spatial Reasoning in LLM",
389
+ "text_level": 1,
390
+ "bbox": [
391
+ 83,
392
+ 672,
393
+ 459,
394
+ 705
395
+ ],
396
+ "page_idx": 2
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "3.1 Image-based Spatial Reasoning",
401
+ "text_level": 1,
402
+ "bbox": [
403
+ 83,
404
+ 714,
405
+ 369,
406
+ 732
407
+ ],
408
+ "page_idx": 2
409
+ },
410
+ {
411
+ "type": "text",
412
+ "text": "Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities.",
413
+ "bbox": [
414
+ 81,
415
+ 734,
416
+ 488,
417
+ 888
418
+ ],
419
+ "page_idx": 2
420
+ },
421
+ {
422
+ "type": "text",
423
+ "text": "3.1.1 Multi-view Images as input",
424
+ "text_level": 1,
425
+ "bbox": [
426
+ 508,
427
+ 380,
428
+ 754,
429
+ 395
430
+ ],
431
+ "page_idx": 2
432
+ },
433
+ {
434
+ "type": "text",
435
+ "text": "Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association.",
436
+ "bbox": [
437
+ 506,
438
+ 396,
439
+ 915,
440
+ 825
441
+ ],
442
+ "page_idx": 2
443
+ },
444
+ {
445
+ "type": "text",
446
+ "text": "3.1.2 Monocular Image as input",
447
+ "text_level": 1,
448
+ "bbox": [
449
+ 508,
450
+ 830,
451
+ 748,
452
+ 845
453
+ ],
454
+ "page_idx": 2
455
+ },
456
+ {
457
+ "type": "text",
458
+ "text": "LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based",
459
+ "bbox": [
460
+ 506,
461
+ 845,
462
+ 913,
463
+ 888
464
+ ],
465
+ "page_idx": 2
466
+ },
467
+ {
468
+ "type": "text",
469
+ "text": "3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths.",
470
+ "bbox": [
471
+ 81,
472
+ 68,
473
+ 486,
474
+ 138
475
+ ],
476
+ "page_idx": 3
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "3.1.3 RGB-D Image as Input",
481
+ "text_level": 1,
482
+ "bbox": [
483
+ 83,
484
+ 145,
485
+ 299,
486
+ 160
487
+ ],
488
+ "page_idx": 3
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM.",
493
+ "bbox": [
494
+ 81,
495
+ 161,
496
+ 486,
497
+ 257
498
+ ],
499
+ "page_idx": 3
500
+ },
501
+ {
502
+ "type": "text",
503
+ "text": "3.1.4 3D Medical Image as input",
504
+ "text_level": 1,
505
+ "bbox": [
506
+ 83,
507
+ 265,
508
+ 325,
509
+ 280
510
+ ],
511
+ "page_idx": 3
512
+ },
513
+ {
514
+ "type": "text",
515
+ "text": "Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images.",
516
+ "bbox": [
517
+ 81,
518
+ 280,
519
+ 488,
520
+ 571
521
+ ],
522
+ "page_idx": 3
523
+ },
524
+ {
525
+ "type": "text",
526
+ "text": "3.1.5 Discussion",
527
+ "text_level": 1,
528
+ "bbox": [
529
+ 83,
530
+ 579,
531
+ 210,
532
+ 592
533
+ ],
534
+ "page_idx": 3
535
+ },
536
+ {
537
+ "type": "text",
538
+ "text": "Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations.",
539
+ "bbox": [
540
+ 81,
541
+ 594,
542
+ 486,
543
+ 746
544
+ ],
545
+ "page_idx": 3
546
+ },
547
+ {
548
+ "type": "text",
549
+ "text": "3.2 Recent Advances of Point Cloud-based Spatial Reasoning",
550
+ "text_level": 1,
551
+ "bbox": [
552
+ 83,
553
+ 757,
554
+ 482,
555
+ 787
556
+ ],
557
+ "page_idx": 3
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em",
562
+ "bbox": [
563
+ 81,
564
+ 790,
565
+ 486,
566
+ 888
567
+ ],
568
+ "page_idx": 3
569
+ },
570
+ {
571
+ "type": "image",
572
+ "img_path": "images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg",
573
+ "image_caption": [
574
+ "Figure 4: An overview of point cloud-based approaches."
575
+ ],
576
+ "image_footnote": [],
577
+ "bbox": [
578
+ 517,
579
+ 66,
580
+ 905,
581
+ 244
582
+ ],
583
+ "page_idx": 3
584
+ },
585
+ {
586
+ "type": "text",
587
+ "text": "beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints.",
588
+ "bbox": [
589
+ 506,
590
+ 290,
591
+ 911,
592
+ 359
593
+ ],
594
+ "page_idx": 3
595
+ },
596
+ {
597
+ "type": "text",
598
+ "text": "3.2.1 Direct Alignment",
599
+ "text_level": 1,
600
+ "bbox": [
601
+ 508,
602
+ 367,
603
+ 684,
604
+ 381
605
+ ],
606
+ "page_idx": 3
607
+ },
608
+ {
609
+ "type": "text",
610
+ "text": "Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction.",
611
+ "bbox": [
612
+ 506,
613
+ 382,
614
+ 913,
615
+ 686
616
+ ],
617
+ "page_idx": 3
618
+ },
619
+ {
620
+ "type": "text",
621
+ "text": "3.2.2 Step-by-step Alignment",
622
+ "text_level": 1,
623
+ "bbox": [
624
+ 508,
625
+ 693,
626
+ 728,
627
+ 707
628
+ ],
629
+ "page_idx": 3
630
+ },
631
+ {
632
+ "type": "text",
633
+ "text": "Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses",
634
+ "bbox": [
635
+ 506,
636
+ 708,
637
+ 913,
638
+ 888
639
+ ],
640
+ "page_idx": 3
641
+ },
642
+ {
643
+ "type": "image",
644
+ "img_path": "images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg",
645
+ "image_caption": [
646
+ "Figure 5: An overview of hybrid modality-based approaches."
647
+ ],
648
+ "image_footnote": [],
649
+ "bbox": [
650
+ 86,
651
+ 66,
652
+ 480,
653
+ 200
654
+ ],
655
+ "page_idx": 4
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research.",
660
+ "bbox": [
661
+ 81,
662
+ 244,
663
+ 488,
664
+ 315
665
+ ],
666
+ "page_idx": 4
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "3.2.3 Task-specific Alignment",
671
+ "text_level": 1,
672
+ "bbox": [
673
+ 83,
674
+ 320,
675
+ 303,
676
+ 334
677
+ ],
678
+ "page_idx": 4
679
+ },
680
+ {
681
+ "type": "text",
682
+ "text": "Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks.",
683
+ "bbox": [
684
+ 81,
685
+ 334,
686
+ 488,
687
+ 583
688
+ ],
689
+ "page_idx": 4
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "3.2.4 Discussion",
694
+ "text_level": 1,
695
+ "bbox": [
696
+ 83,
697
+ 590,
698
+ 210,
699
+ 603
700
+ ],
701
+ "page_idx": 4
702
+ },
703
+ {
704
+ "type": "text",
705
+ "text": "The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability.",
706
+ "bbox": [
707
+ 81,
708
+ 604,
709
+ 488,
710
+ 704
711
+ ],
712
+ "page_idx": 4
713
+ },
714
+ {
715
+ "type": "text",
716
+ "text": "3.3 Hybrid Modality-based Spatial Reasoning",
717
+ "text_level": 1,
718
+ "bbox": [
719
+ 83,
720
+ 710,
721
+ 449,
722
+ 726
723
+ ],
724
+ "page_idx": 4
725
+ },
726
+ {
727
+ "type": "text",
728
+ "text": "Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction.",
729
+ "bbox": [
730
+ 81,
731
+ 728,
732
+ 486,
733
+ 839
734
+ ],
735
+ "page_idx": 4
736
+ },
737
+ {
738
+ "type": "text",
739
+ "text": "3.3.1 Tightly Coupled",
740
+ "text_level": 1,
741
+ "bbox": [
742
+ 83,
743
+ 845,
744
+ 251,
745
+ 859
746
+ ],
747
+ "page_idx": 4
748
+ },
749
+ {
750
+ "type": "text",
751
+ "text": "Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and",
752
+ "bbox": [
753
+ 81,
754
+ 859,
755
+ 488,
756
+ 888
757
+ ],
758
+ "page_idx": 4
759
+ },
760
+ {
761
+ "type": "text",
762
+ "text": "language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding.",
763
+ "bbox": [
764
+ 506,
765
+ 68,
766
+ 915,
767
+ 430
768
+ ],
769
+ "page_idx": 4
770
+ },
771
+ {
772
+ "type": "text",
773
+ "text": "3.3.2 Loosely Coupled",
774
+ "text_level": 1,
775
+ "bbox": [
776
+ 509,
777
+ 435,
778
+ 681,
779
+ 450
780
+ ],
781
+ "page_idx": 4
782
+ },
783
+ {
784
+ "type": "text",
785
+ "text": "Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches.",
786
+ "bbox": [
787
+ 506,
788
+ 450,
789
+ 913,
790
+ 784
791
+ ],
792
+ "page_idx": 4
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "3.3.3 Discussion",
797
+ "text_level": 1,
798
+ "bbox": [
799
+ 509,
800
+ 790,
801
+ 637,
802
+ 803
803
+ ],
804
+ "page_idx": 4
805
+ },
806
+ {
807
+ "type": "text",
808
+ "text": "The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable",
809
+ "bbox": [
810
+ 506,
811
+ 805,
812
+ 913,
813
+ 888
814
+ ],
815
+ "page_idx": 4
816
+ },
817
+ {
818
+ "type": "table",
819
+ "img_path": "images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg",
820
+ "table_caption": [],
821
+ "table_footnote": [],
822
+ "table_body": "<table><tr><td></td><td>Model</td><td>Data Source</td><td>Alignment Type</td><td>Pre-training</td><td>Fine-tuning</td><td>Task</td><td>Code</td></tr><tr><td rowspan=\"11\">Image - based</td><td>LLaVA-3D [Zhu et al., 2024b]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D Scene Understanding</td><td>code</td></tr><tr><td>Agent3D-Zero [Zhang et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✗</td><td>3D VQA, 3D Semantic Segmentation</td><td>✗</td></tr><tr><td>ShapeLLM [Qi et al., 2024a]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Scene Captioning</td><td>code</td></tr><tr><td>Scene-LLM [Fu et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, Dense Captioning</td><td>✗</td></tr><tr><td>SpatialPIN [Ma et al., 2024a]</td><td>RGB-D Images</td><td>-</td><td>✓</td><td>✗</td><td>3D Motion Planning, Task Video Generation</td><td>✗</td></tr><tr><td>LLMI3D [Yang et al., 2024]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Grounding, 3D VQA</td><td>✗</td></tr><tr><td>Spatialvm [Chen et al., 2024a]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>Dense Reward Annotator, Spatial Data Generation</td><td>code</td></tr><tr><td>M3D-LaMed [Bai et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D VLP</td><td>code</td></tr><tr><td>HILT [Liu et al., 2024a]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3DHRG</td><td>✗</td></tr><tr><td>3D-CT-GPT [Chen et al., 2024b]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Radiology Report Generation, 3D VQA</td><td>✗</td></tr><tr><td>OpenMEDLab [Wang et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Medical Imaging</td><td>code</td></tr><tr><td rowspan=\"16\">Point Cloud - based</td><td>PointLLM [Xu et al., 2025]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>Chat-Scene [Huang et al., 2024]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Visual Grounding, 3D Scene Captioning</td><td>code</td></tr><tr><td>PointCLIP [Zhang et al., 2022]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>PointCLIPv2 [Zhu et al., 2023]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>GPT4Point [Qi et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Understanding</td><td>code</td></tr><tr><td>MiniGPT-3D [Tang et al., 2024a]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>GreenPLM [Tang et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification</td><td>code</td></tr><tr><td>Grounded 3D-LLM [Chen et al., 2024d]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Detection, 3D VQA</td><td>code</td></tr><tr><td>Lidar-LLM [Yang et al., 2023]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Captioning, 3D Grounding</td><td>code</td></tr><tr><td>3D-LLaVA [Deng et al., 2025]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA, 3D Captioning</td><td>code</td></tr><tr><td>ScanReason [Zhu et al., 2024a]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Reasoning Grounding</td><td>code</td></tr><tr><td>SegPoint [He et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Instruction Segmentation</td><td>✗</td></tr><tr><td>Kestrel [Fei et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Part-Aware Point Grounding</td><td>✗</td></tr><tr><td>SIG3D [Man et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Situation Estimation</td><td>code</td></tr><tr><td>Chat-3D [Wang et al., 2023]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA</td><td>code</td></tr><tr><td>LL3DA [Chen et al., 2024c]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Dense Captioning</td><td>code</td></tr><tr><td rowspan=\"6\">Hybrid - based</td><td>Point-bind [Guo et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D Cross-modal Retrieval, Any-to-3D Generation</td><td>code</td></tr><tr><td>JM3D [Ji et al., 2024]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Image-3D Retrieval, 3D Part Segmentation</td><td>code</td></tr><tr><td>Uni3D [Zhou et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Zero-shot Shape Classification</td><td>code</td></tr><tr><td>Uni3D-LLM [Liu et al., 2024b]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D VQA</td><td>✗</td></tr><tr><td>MultiPLY [Hong et al., 2024]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>Object retrieval</td><td>code</td></tr><tr><td>UniPoint-LLM [Liu et al.]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>3D generation, 3D VQA</td><td>✗</td></tr></table>",
823
+ "bbox": [
824
+ 86,
825
+ 65,
826
+ 911,
827
+ 395
828
+ ],
829
+ "page_idx": 5
830
+ },
831
+ {
832
+ "type": "text",
833
+ "text": "Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities.",
834
+ "bbox": [
835
+ 81,
836
+ 404,
837
+ 913,
838
+ 458
839
+ ],
840
+ "page_idx": 5
841
+ },
842
+ {
843
+ "type": "text",
844
+ "text": "for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands.",
845
+ "bbox": [
846
+ 81,
847
+ 479,
848
+ 486,
849
+ 717
850
+ ],
851
+ "page_idx": 5
852
+ },
853
+ {
854
+ "type": "text",
855
+ "text": "4 Applications",
856
+ "text_level": 1,
857
+ "bbox": [
858
+ 83,
859
+ 734,
860
+ 223,
861
+ 752
862
+ ],
863
+ "page_idx": 5
864
+ },
865
+ {
866
+ "type": "text",
867
+ "text": "A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts.",
868
+ "bbox": [
869
+ 81,
870
+ 757,
871
+ 486,
872
+ 856
873
+ ],
874
+ "page_idx": 5
875
+ },
876
+ {
877
+ "type": "text",
878
+ "text": "3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for",
879
+ "bbox": [
880
+ 83,
881
+ 859,
882
+ 488,
883
+ 891
884
+ ],
885
+ "page_idx": 5
886
+ },
887
+ {
888
+ "type": "text",
889
+ "text": "tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses.",
890
+ "bbox": [
891
+ 506,
892
+ 479,
893
+ 913,
894
+ 566
895
+ ],
896
+ "page_idx": 5
897
+ },
898
+ {
899
+ "type": "text",
900
+ "text": "Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts.",
901
+ "bbox": [
902
+ 506,
903
+ 571,
904
+ 913,
905
+ 686
906
+ ],
907
+ "page_idx": 5
908
+ },
909
+ {
910
+ "type": "text",
911
+ "text": "5 Challenges and Future Directions",
912
+ "text_level": 1,
913
+ "bbox": [
914
+ 509,
915
+ 715,
916
+ 823,
917
+ 732
918
+ ],
919
+ "page_idx": 5
920
+ },
921
+ {
922
+ "type": "text",
923
+ "text": "Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future.",
924
+ "bbox": [
925
+ 506,
926
+ 750,
927
+ 913,
928
+ 890
929
+ ],
930
+ "page_idx": 5
931
+ },
932
+ {
933
+ "type": "text",
934
+ "text": "5.1 Challenges",
935
+ "text_level": 1,
936
+ "bbox": [
937
+ 84,
938
+ 68,
939
+ 210,
940
+ 83
941
+ ],
942
+ "page_idx": 6
943
+ },
944
+ {
945
+ "type": "text",
946
+ "text": "Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings.",
947
+ "bbox": [
948
+ 86,
949
+ 95,
950
+ 485,
951
+ 219
952
+ ],
953
+ "page_idx": 6
954
+ },
955
+ {
956
+ "type": "text",
957
+ "text": "Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure.",
958
+ "bbox": [
959
+ 86,
960
+ 224,
961
+ 485,
962
+ 419
963
+ ],
964
+ "page_idx": 6
965
+ },
966
+ {
967
+ "type": "text",
968
+ "text": "Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs.",
969
+ "bbox": [
970
+ 86,
971
+ 425,
972
+ 485,
973
+ 647
974
+ ],
975
+ "page_idx": 6
976
+ },
977
+ {
978
+ "type": "text",
979
+ "text": "Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning.",
980
+ "bbox": [
981
+ 86,
982
+ 652,
983
+ 485,
984
+ 750
985
+ ],
986
+ "page_idx": 6
987
+ },
988
+ {
989
+ "type": "text",
990
+ "text": "5.2 Future Directions",
991
+ "text_level": 1,
992
+ "bbox": [
993
+ 86,
994
+ 763,
995
+ 261,
996
+ 777
997
+ ],
998
+ "page_idx": 6
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "text": "Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point",
1003
+ "bbox": [
1004
+ 86,
1005
+ 791,
1006
+ 485,
1007
+ 888
1008
+ ],
1009
+ "page_idx": 6
1010
+ },
1011
+ {
1012
+ "type": "text",
1013
+ "text": "clouds with sequence-based models, enabling fine-grained spatial understanding and generation.",
1014
+ "bbox": [
1015
+ 513,
1016
+ 69,
1017
+ 911,
1018
+ 95
1019
+ ],
1020
+ "page_idx": 6
1021
+ },
1022
+ {
1023
+ "type": "text",
1024
+ "text": "Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems.",
1025
+ "bbox": [
1026
+ 513,
1027
+ 101,
1028
+ 911,
1029
+ 268
1030
+ ],
1031
+ "page_idx": 6
1032
+ },
1033
+ {
1034
+ "type": "text",
1035
+ "text": "Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend \"an old rocking chair\" even if this specific type of chair never appeared in the training data.",
1036
+ "bbox": [
1037
+ 513,
1038
+ 273,
1039
+ 911,
1040
+ 383
1041
+ ],
1042
+ "page_idx": 6
1043
+ },
1044
+ {
1045
+ "type": "text",
1046
+ "text": "Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts.",
1047
+ "bbox": [
1048
+ 513,
1049
+ 388,
1050
+ 911,
1051
+ 486
1052
+ ],
1053
+ "page_idx": 6
1054
+ },
1055
+ {
1056
+ "type": "text",
1057
+ "text": "Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment.",
1058
+ "bbox": [
1059
+ 513,
1060
+ 487,
1061
+ 911,
1062
+ 598
1063
+ ],
1064
+ "page_idx": 6
1065
+ },
1066
+ {
1067
+ "type": "text",
1068
+ "text": "6 Conclusion",
1069
+ "text_level": 1,
1070
+ "bbox": [
1071
+ 513,
1072
+ 616,
1073
+ 635,
1074
+ 631
1075
+ ],
1076
+ "page_idx": 6
1077
+ },
1078
+ {
1079
+ "type": "text",
1080
+ "text": "The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement.",
1081
+ "bbox": [
1082
+ 513,
1083
+ 638,
1084
+ 911,
1085
+ 888
1086
+ ],
1087
+ "page_idx": 6
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "text": "References",
1092
+ "text_level": 1,
1093
+ "bbox": [
1094
+ 84,
1095
+ 66,
1096
+ 179,
1097
+ 82
1098
+ ],
1099
+ "page_idx": 7
1100
+ },
1101
+ {
1102
+ "type": "list",
1103
+ "sub_type": "ref_text",
1104
+ "list_items": [
1105
+ "Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024.",
1106
+ "Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024.",
1107
+ "Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024.",
1108
+ "Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024.",
1109
+ "Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024.",
1110
+ "Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025.",
1111
+ "Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024.",
1112
+ "Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024.",
1113
+ "Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024.",
1114
+ "Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024.",
1115
+ "Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023."
1116
+ ],
1117
+ "bbox": [
1118
+ 84,
1119
+ 85,
1120
+ 486,
1121
+ 888
1122
+ ],
1123
+ "page_idx": 7
1124
+ },
1125
+ {
1126
+ "type": "list",
1127
+ "sub_type": "ref_text",
1128
+ "list_items": [
1129
+ "Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024.",
1130
+ "Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023.",
1131
+ "Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024.",
1132
+ "Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.",
1133
+ "Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.",
1134
+ "Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024.",
1135
+ "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019.",
1136
+ "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022.",
1137
+ "Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing.",
1138
+ "Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024.",
1139
+ "Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024.",
1140
+ "Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning"
1141
+ ],
1142
+ "bbox": [
1143
+ 511,
1144
+ 69,
1145
+ 911,
1146
+ 888
1147
+ ],
1148
+ "page_idx": 7
1149
+ },
1150
+ {
1151
+ "type": "list",
1152
+ "sub_type": "ref_text",
1153
+ "list_items": [
1154
+ "capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024.",
1155
+ "Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024.",
1156
+ "Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.",
1157
+ "Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024.",
1158
+ "Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024.",
1159
+ "Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024.",
1160
+ "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.",
1161
+ "Alec Radford. Improving language understanding by generative pre-training. 2018.",
1162
+ "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024.",
1163
+ "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024.",
1164
+ "Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023.",
1165
+ "Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024."
1166
+ ],
1167
+ "bbox": [
1168
+ 84,
1169
+ 68,
1170
+ 488,
1171
+ 888
1172
+ ],
1173
+ "page_idx": 8
1174
+ },
1175
+ {
1176
+ "type": "list",
1177
+ "sub_type": "ref_text",
1178
+ "list_items": [
1179
+ "Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025.",
1180
+ "Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023.",
1181
+ "Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024.",
1182
+ "Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024.",
1183
+ "Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023.",
1184
+ "Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024.",
1185
+ "Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022.",
1186
+ "Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024.",
1187
+ "Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023.",
1188
+ "Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023.",
1189
+ "Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024.",
1190
+ "Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024."
1191
+ ],
1192
+ "bbox": [
1193
+ 511,
1194
+ 68,
1195
+ 913,
1196
+ 883
1197
+ ],
1198
+ "page_idx": 8
1199
+ }
1200
+ ]
data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_model.json ADDED
@@ -0,0 +1,1681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.27,
8
+ 0.061,
9
+ 0.701
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2504.05786v1 [cs.CV] 8 Apr 2025"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.099,
18
+ 0.109,
19
+ 0.9,
20
+ 0.132
21
+ ],
22
+ "angle": 0,
23
+ "content": "How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.194,
29
+ 0.148,
30
+ 0.8,
31
+ 0.168
32
+ ],
33
+ "angle": 0,
34
+ "content": "Jirong Zha\\(^{1*}\\), Yuxuan Fan\\(^{2*}\\), Xiao Yang\\(^{2}\\), Chen Gao\\(^{1\\dagger}\\), Xinlei Chen\\(^{1\\dagger}\\)"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.415,
40
+ 0.171,
41
+ 0.584,
42
+ 0.189
43
+ ],
44
+ "angle": 0,
45
+ "content": "\\(^{1}\\)Tsinghua University"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.221,
51
+ 0.19,
52
+ 0.778,
53
+ 0.208
54
+ ],
55
+ "angle": 0,
56
+ "content": "2The Hong Kong University of Science and Technology (Guang Zhou)"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.162,
62
+ 0.21,
63
+ 0.838,
64
+ 0.244
65
+ ],
66
+ "angle": 0,
67
+ "content": "zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.246,
73
+ 0.272,
74
+ 0.325,
75
+ 0.289
76
+ ],
77
+ "angle": 0,
78
+ "content": "Abstract"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.115,
84
+ 0.299,
85
+ 0.457,
86
+ 0.633
87
+ ],
88
+ "angle": 0,
89
+ "content": "3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications."
90
+ },
91
+ {
92
+ "type": "title",
93
+ "bbox": [
94
+ 0.084,
95
+ 0.658,
96
+ 0.225,
97
+ 0.673
98
+ ],
99
+ "angle": 0,
100
+ "content": "1 Introduction"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.083,
106
+ 0.681,
107
+ 0.49,
108
+ 0.89
109
+ ],
110
+ "angle": 0,
111
+ "content": "Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024]."
112
+ },
113
+ {
114
+ "type": "image",
115
+ "bbox": [
116
+ 0.516,
117
+ 0.272,
118
+ 0.909,
119
+ 0.412
120
+ ],
121
+ "angle": 0,
122
+ "content": null
123
+ },
124
+ {
125
+ "type": "image_caption",
126
+ "bbox": [
127
+ 0.509,
128
+ 0.424,
129
+ 0.915,
130
+ 0.476
131
+ ],
132
+ "angle": 0,
133
+ "content": "Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.508,
139
+ 0.506,
140
+ 0.914,
141
+ 0.659
142
+ ],
143
+ "angle": 0,
144
+ "content": "At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.508,
150
+ 0.667,
151
+ 0.916,
152
+ 0.892
153
+ ],
154
+ "angle": 0,
155
+ "content": "LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a \"3D capacity\" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design"
156
+ }
157
+ ],
158
+ [
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.083,
163
+ 0.069,
164
+ 0.488,
165
+ 0.111
166
+ ],
167
+ "angle": 0,
168
+ "content": "ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.082,
174
+ 0.114,
175
+ 0.487,
176
+ 0.337
177
+ ],
178
+ "angle": 0,
179
+ "content": "Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.082,
185
+ 0.34,
186
+ 0.487,
187
+ 0.549
188
+ ],
189
+ "angle": 0,
190
+ "content": "Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research."
191
+ },
192
+ {
193
+ "type": "title",
194
+ "bbox": [
195
+ 0.084,
196
+ 0.562,
197
+ 0.22,
198
+ 0.579
199
+ ],
200
+ "angle": 0,
201
+ "content": "2 Preliminary"
202
+ },
203
+ {
204
+ "type": "title",
205
+ "bbox": [
206
+ 0.084,
207
+ 0.582,
208
+ 0.315,
209
+ 0.597
210
+ ],
211
+ "angle": 0,
212
+ "content": "2.1 Large Language Models"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.082,
218
+ 0.599,
219
+ 0.487,
220
+ 0.71
221
+ ],
222
+ "angle": 0,
223
+ "content": "Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024]."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.082,
229
+ 0.71,
230
+ 0.487,
231
+ 0.837
232
+ ],
233
+ "angle": 0,
234
+ "content": "Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence."
235
+ },
236
+ {
237
+ "type": "title",
238
+ "bbox": [
239
+ 0.084,
240
+ 0.844,
241
+ 0.277,
242
+ 0.858
243
+ ],
244
+ "angle": 0,
245
+ "content": "2.2 3D Data Structures"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.084,
251
+ 0.861,
252
+ 0.488,
253
+ 0.89
254
+ ],
255
+ "angle": 0,
256
+ "content": "3D data has different structures, which are essential for understanding the three-dimensional world, and common methods"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.508,
262
+ 0.069,
263
+ 0.915,
264
+ 0.125
265
+ ],
266
+ "angle": 0,
267
+ "content": "include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as"
268
+ },
269
+ {
270
+ "type": "equation",
271
+ "bbox": [
272
+ 0.606,
273
+ 0.13,
274
+ 0.816,
275
+ 0.149
276
+ ],
277
+ "angle": 0,
278
+ "content": "\\[\nP = \\left\\{p _ {i} \\in \\mathbb {R} ^ {3} \\mid i = 1, \\dots , N \\right\\},\n\\]"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.509,
284
+ 0.153,
285
+ 0.913,
286
+ 0.293
287
+ ],
288
+ "angle": 0,
289
+ "content": "which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel \\(V(i,j,k)\\) storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices \\(\\{v_{i}\\}\\) and faces \\(\\{F_j\\}\\), though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as"
290
+ },
291
+ {
292
+ "type": "equation",
293
+ "bbox": [
294
+ 0.651,
295
+ 0.298,
296
+ 0.771,
297
+ 0.315
298
+ ],
299
+ "angle": 0,
300
+ "content": "\\[\nf _ {\\theta}: \\mathbb {R} ^ {3} \\rightarrow (c, \\sigma),\n\\]"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.508,
306
+ 0.32,
307
+ 0.915,
308
+ 0.46
309
+ ],
310
+ "angle": 0,
311
+ "content": "which maps spatial coordinates to color \\(c\\) and density \\(\\sigma\\). Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating \\(f_{\\theta}\\) with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point \\(p_i\\) with a covariance matrix \\(\\Sigma_i\\) and color \\(c_i\\), efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation."
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.509,
317
+ 0.469,
318
+ 0.707,
319
+ 0.485
320
+ ],
321
+ "angle": 0,
322
+ "content": "2.3 Proposed taxonomy"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.508,
328
+ 0.488,
329
+ 0.915,
330
+ 0.848
331
+ ],
332
+ "angle": 0,
333
+ "content": "We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.508,
339
+ 0.847,
340
+ 0.914,
341
+ 0.89
342
+ ],
343
+ "angle": 0,
344
+ "content": "This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch"
345
+ }
346
+ ],
347
+ [
348
+ {
349
+ "type": "image",
350
+ "bbox": [
351
+ 0.094,
352
+ 0.066,
353
+ 0.904,
354
+ 0.322
355
+ ],
356
+ "angle": 0,
357
+ "content": null
358
+ },
359
+ {
360
+ "type": "image_caption",
361
+ "bbox": [
362
+ 0.082,
363
+ 0.33,
364
+ 0.913,
365
+ 0.357
366
+ ],
367
+ "angle": 0,
368
+ "content": "Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions."
369
+ },
370
+ {
371
+ "type": "image",
372
+ "bbox": [
373
+ 0.088,
374
+ 0.379,
375
+ 0.487,
376
+ 0.58
377
+ ],
378
+ "angle": 0,
379
+ "content": null
380
+ },
381
+ {
382
+ "type": "image_caption",
383
+ "bbox": [
384
+ 0.132,
385
+ 0.592,
386
+ 0.438,
387
+ 0.607
388
+ ],
389
+ "angle": 0,
390
+ "content": "Figure 3: An overview of image-based approaches."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.083,
396
+ 0.629,
397
+ 0.489,
398
+ 0.658
399
+ ],
400
+ "angle": 0,
401
+ "content": "must address. Figure 2 presents a detailed breakdown of representative works in each category."
402
+ },
403
+ {
404
+ "type": "title",
405
+ "bbox": [
406
+ 0.084,
407
+ 0.674,
408
+ 0.46,
409
+ 0.707
410
+ ],
411
+ "angle": 0,
412
+ "content": "3 Recent Advances of Spatial Reasoning in LLM"
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.084,
418
+ 0.715,
419
+ 0.37,
420
+ 0.733
421
+ ],
422
+ "angle": 0,
423
+ "content": "3.1 Image-based Spatial Reasoning"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.082,
429
+ 0.736,
430
+ 0.489,
431
+ 0.89
432
+ ],
433
+ "angle": 0,
434
+ "content": "Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities."
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.509,
440
+ 0.381,
441
+ 0.755,
442
+ 0.396
443
+ ],
444
+ "angle": 0,
445
+ "content": "3.1.1 Multi-view Images as input"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.508,
451
+ 0.397,
452
+ 0.916,
453
+ 0.827
454
+ ],
455
+ "angle": 0,
456
+ "content": "Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association."
457
+ },
458
+ {
459
+ "type": "title",
460
+ "bbox": [
461
+ 0.509,
462
+ 0.832,
463
+ 0.749,
464
+ 0.847
465
+ ],
466
+ "angle": 0,
467
+ "content": "3.1.2 Monocular Image as input"
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.508,
473
+ 0.847,
474
+ 0.915,
475
+ 0.89
476
+ ],
477
+ "angle": 0,
478
+ "content": "LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based"
479
+ }
480
+ ],
481
+ [
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.083,
486
+ 0.069,
487
+ 0.488,
488
+ 0.14
489
+ ],
490
+ "angle": 0,
491
+ "content": "3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths."
492
+ },
493
+ {
494
+ "type": "title",
495
+ "bbox": [
496
+ 0.084,
497
+ 0.146,
498
+ 0.3,
499
+ 0.161
500
+ ],
501
+ "angle": 0,
502
+ "content": "3.1.3 RGB-D Image as Input"
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.082,
508
+ 0.162,
509
+ 0.488,
510
+ 0.258
511
+ ],
512
+ "angle": 0,
513
+ "content": "Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM."
514
+ },
515
+ {
516
+ "type": "title",
517
+ "bbox": [
518
+ 0.084,
519
+ 0.266,
520
+ 0.326,
521
+ 0.281
522
+ ],
523
+ "angle": 0,
524
+ "content": "3.1.4 3D Medical Image as input"
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.082,
530
+ 0.281,
531
+ 0.489,
532
+ 0.573
533
+ ],
534
+ "angle": 0,
535
+ "content": "Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images."
536
+ },
537
+ {
538
+ "type": "title",
539
+ "bbox": [
540
+ 0.084,
541
+ 0.58,
542
+ 0.212,
543
+ 0.593
544
+ ],
545
+ "angle": 0,
546
+ "content": "3.1.5 Discussion"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.082,
552
+ 0.595,
553
+ 0.488,
554
+ 0.747
555
+ ],
556
+ "angle": 0,
557
+ "content": "Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations."
558
+ },
559
+ {
560
+ "type": "title",
561
+ "bbox": [
562
+ 0.084,
563
+ 0.758,
564
+ 0.483,
565
+ 0.789
566
+ ],
567
+ "angle": 0,
568
+ "content": "3.2 Recent Advances of Point Cloud-based Spatial Reasoning"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.082,
574
+ 0.791,
575
+ 0.488,
576
+ 0.89
577
+ ],
578
+ "angle": 0,
579
+ "content": "As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em"
580
+ },
581
+ {
582
+ "type": "image",
583
+ "bbox": [
584
+ 0.518,
585
+ 0.068,
586
+ 0.906,
587
+ 0.245
588
+ ],
589
+ "angle": 0,
590
+ "content": null
591
+ },
592
+ {
593
+ "type": "image_caption",
594
+ "bbox": [
595
+ 0.543,
596
+ 0.257,
597
+ 0.878,
598
+ 0.271
599
+ ],
600
+ "angle": 0,
601
+ "content": "Figure 4: An overview of point cloud-based approaches."
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.508,
607
+ 0.291,
608
+ 0.913,
609
+ 0.361
610
+ ],
611
+ "angle": 0,
612
+ "content": "beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints."
613
+ },
614
+ {
615
+ "type": "title",
616
+ "bbox": [
617
+ 0.509,
618
+ 0.368,
619
+ 0.686,
620
+ 0.382
621
+ ],
622
+ "angle": 0,
623
+ "content": "3.2.1 Direct Alignment"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.508,
629
+ 0.383,
630
+ 0.915,
631
+ 0.688
632
+ ],
633
+ "angle": 0,
634
+ "content": "Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction."
635
+ },
636
+ {
637
+ "type": "title",
638
+ "bbox": [
639
+ 0.509,
640
+ 0.694,
641
+ 0.729,
642
+ 0.708
643
+ ],
644
+ "angle": 0,
645
+ "content": "3.2.2 Step-by-step Alignment"
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.508,
651
+ 0.709,
652
+ 0.914,
653
+ 0.89
654
+ ],
655
+ "angle": 0,
656
+ "content": "Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses"
657
+ }
658
+ ],
659
+ [
660
+ {
661
+ "type": "image",
662
+ "bbox": [
663
+ 0.088,
664
+ 0.068,
665
+ 0.482,
666
+ 0.201
667
+ ],
668
+ "angle": 0,
669
+ "content": null
670
+ },
671
+ {
672
+ "type": "image_caption",
673
+ "bbox": [
674
+ 0.103,
675
+ 0.212,
676
+ 0.466,
677
+ 0.226
678
+ ],
679
+ "angle": 0,
680
+ "content": "Figure 5: An overview of hybrid modality-based approaches."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.082,
686
+ 0.245,
687
+ 0.489,
688
+ 0.316
689
+ ],
690
+ "angle": 0,
691
+ "content": "a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.084,
697
+ 0.321,
698
+ 0.305,
699
+ 0.335
700
+ ],
701
+ "angle": 0,
702
+ "content": "3.2.3 Task-specific Alignment"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.082,
708
+ 0.335,
709
+ 0.489,
710
+ 0.584
711
+ ],
712
+ "angle": 0,
713
+ "content": "Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks."
714
+ },
715
+ {
716
+ "type": "title",
717
+ "bbox": [
718
+ 0.084,
719
+ 0.591,
720
+ 0.212,
721
+ 0.604
722
+ ],
723
+ "angle": 0,
724
+ "content": "3.2.4 Discussion"
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.082,
730
+ 0.605,
731
+ 0.489,
732
+ 0.705
733
+ ],
734
+ "angle": 0,
735
+ "content": "The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability."
736
+ },
737
+ {
738
+ "type": "title",
739
+ "bbox": [
740
+ 0.084,
741
+ 0.711,
742
+ 0.45,
743
+ 0.727
744
+ ],
745
+ "angle": 0,
746
+ "content": "3.3 Hybrid Modality-based Spatial Reasoning"
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.082,
752
+ 0.729,
753
+ 0.487,
754
+ 0.84
755
+ ],
756
+ "angle": 0,
757
+ "content": "Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction."
758
+ },
759
+ {
760
+ "type": "title",
761
+ "bbox": [
762
+ 0.084,
763
+ 0.847,
764
+ 0.252,
765
+ 0.861
766
+ ],
767
+ "angle": 0,
768
+ "content": "3.3.1 Tightly Coupled"
769
+ },
770
+ {
771
+ "type": "text",
772
+ "bbox": [
773
+ 0.082,
774
+ 0.861,
775
+ 0.489,
776
+ 0.89
777
+ ],
778
+ "angle": 0,
779
+ "content": "Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and"
780
+ },
781
+ {
782
+ "type": "text",
783
+ "bbox": [
784
+ 0.508,
785
+ 0.069,
786
+ 0.916,
787
+ 0.431
788
+ ],
789
+ "angle": 0,
790
+ "content": "language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding."
791
+ },
792
+ {
793
+ "type": "title",
794
+ "bbox": [
795
+ 0.51,
796
+ 0.436,
797
+ 0.682,
798
+ 0.451
799
+ ],
800
+ "angle": 0,
801
+ "content": "3.3.2 Loosely Coupled"
802
+ },
803
+ {
804
+ "type": "text",
805
+ "bbox": [
806
+ 0.508,
807
+ 0.452,
808
+ 0.915,
809
+ 0.785
810
+ ],
811
+ "angle": 0,
812
+ "content": "Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches."
813
+ },
814
+ {
815
+ "type": "title",
816
+ "bbox": [
817
+ 0.51,
818
+ 0.791,
819
+ 0.638,
820
+ 0.804
821
+ ],
822
+ "angle": 0,
823
+ "content": "3.3.3 Discussion"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.508,
829
+ 0.806,
830
+ 0.914,
831
+ 0.89
832
+ ],
833
+ "angle": 0,
834
+ "content": "The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable"
835
+ }
836
+ ],
837
+ [
838
+ {
839
+ "type": "table",
840
+ "bbox": [
841
+ 0.087,
842
+ 0.066,
843
+ 0.913,
844
+ 0.396
845
+ ],
846
+ "angle": 0,
847
+ "content": "<table><tr><td></td><td>Model</td><td>Data Source</td><td>Alignment Type</td><td>Pre-training</td><td>Fine-tuning</td><td>Task</td><td>Code</td></tr><tr><td rowspan=\"11\">Image - based</td><td>LLaVA-3D [Zhu et al., 2024b]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D Scene Understanding</td><td>code</td></tr><tr><td>Agent3D-Zero [Zhang et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✗</td><td>3D VQA, 3D Semantic Segmentation</td><td>✗</td></tr><tr><td>ShapeLLM [Qi et al., 2024a]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Scene Captioning</td><td>code</td></tr><tr><td>Scene-LLM [Fu et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, Dense Captioning</td><td>✗</td></tr><tr><td>SpatialPIN [Ma et al., 2024a]</td><td>RGB-D Images</td><td>-</td><td>✓</td><td>✗</td><td>3D Motion Planning, Task Video Generation</td><td>✗</td></tr><tr><td>LLMI3D [Yang et al., 2024]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Grounding, 3D VQA</td><td>✗</td></tr><tr><td>Spatialvm [Chen et al., 2024a]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>Dense Reward Annotator, Spatial Data Generation</td><td>code</td></tr><tr><td>M3D-LaMed [Bai et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D VLP</td><td>code</td></tr><tr><td>HILT [Liu et al., 2024a]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3DHRG</td><td>✗</td></tr><tr><td>3D-CT-GPT [Chen et al., 2024b]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Radiology Report Generation, 3D VQA</td><td>✗</td></tr><tr><td>OpenMEDLab [Wang et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Medical Imaging</td><td>code</td></tr><tr><td rowspan=\"16\">Point Cloud - based</td><td>PointLLM [Xu et al., 2025]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>Chat-Scene [Huang et al., 2024]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Visual Grounding, 3D Scene Captioning</td><td>code</td></tr><tr><td>PointCLIP [Zhang et al., 2022]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>PointCLIPv2 [Zhu et al., 2023]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>GPT4Point [Qi et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Understanding</td><td>code</td></tr><tr><td>MiniGPT-3D [Tang et al., 2024a]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>GreenPLM [Tang et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification</td><td>code</td></tr><tr><td>Grounded 3D-LLM [Chen et al., 2024d]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Detection, 3D VQA</td><td>code</td></tr><tr><td>Lidar-LLM [Yang et al., 2023]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Captioning, 3D Grounding</td><td>code</td></tr><tr><td>3D-LLaVA [Deng et al., 2025]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA, 3D Captioning</td><td>code</td></tr><tr><td>ScanReason [Zhu et al., 2024a]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Reasoning Grounding</td><td>code</td></tr><tr><td>SegPoint [He et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Instruction Segmentation</td><td>✗</td></tr><tr><td>Kestrel [Fei et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Part-Aware Point Grounding</td><td>✗</td></tr><tr><td>SIG3D [Man et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Situation Estimation</td><td>code</td></tr><tr><td>Chat-3D [Wang et al., 2023]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA</td><td>code</td></tr><tr><td>LL3DA [Chen et al., 2024c]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Dense Captioning</td><td>code</td></tr><tr><td rowspan=\"6\">Hybrid - based</td><td>Point-bind [Guo et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D Cross-modal Retrieval, Any-to-3D Generation</td><td>code</td></tr><tr><td>JM3D [Ji et al., 2024]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Image-3D Retrieval, 3D Part Segmentation</td><td>code</td></tr><tr><td>Uni3D [Zhou et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Zero-shot Shape Classification</td><td>code</td></tr><tr><td>Uni3D-LLM [Liu et al., 2024b]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D VQA</td><td>✗</td></tr><tr><td>MultiPLY [Hong et al., 2024]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>Object retrieval</td><td>code</td></tr><tr><td>UniPoint-LLM [Liu et al.]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>3D generation, 3D VQA</td><td>✗</td></tr></table>"
848
+ },
849
+ {
850
+ "type": "table_caption",
851
+ "bbox": [
852
+ 0.082,
853
+ 0.405,
854
+ 0.914,
855
+ 0.459
856
+ ],
857
+ "angle": 0,
858
+ "content": "Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities."
859
+ },
860
+ {
861
+ "type": "text",
862
+ "bbox": [
863
+ 0.082,
864
+ 0.481,
865
+ 0.487,
866
+ 0.718
867
+ ],
868
+ "angle": 0,
869
+ "content": "for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands."
870
+ },
871
+ {
872
+ "type": "title",
873
+ "bbox": [
874
+ 0.084,
875
+ 0.735,
876
+ 0.225,
877
+ 0.753
878
+ ],
879
+ "angle": 0,
880
+ "content": "4 Applications"
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.082,
886
+ 0.758,
887
+ 0.488,
888
+ 0.857
889
+ ],
890
+ "angle": 0,
891
+ "content": "A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts."
892
+ },
893
+ {
894
+ "type": "text",
895
+ "bbox": [
896
+ 0.084,
897
+ 0.861,
898
+ 0.49,
899
+ 0.892
900
+ ],
901
+ "angle": 0,
902
+ "content": "3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for"
903
+ },
904
+ {
905
+ "type": "text",
906
+ "bbox": [
907
+ 0.508,
908
+ 0.481,
909
+ 0.914,
910
+ 0.567
911
+ ],
912
+ "angle": 0,
913
+ "content": "tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses."
914
+ },
915
+ {
916
+ "type": "text",
917
+ "bbox": [
918
+ 0.508,
919
+ 0.573,
920
+ 0.915,
921
+ 0.687
922
+ ],
923
+ "angle": 0,
924
+ "content": "Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts."
925
+ },
926
+ {
927
+ "type": "title",
928
+ "bbox": [
929
+ 0.51,
930
+ 0.716,
931
+ 0.825,
932
+ 0.733
933
+ ],
934
+ "angle": 0,
935
+ "content": "5 Challenges and Future Directions"
936
+ },
937
+ {
938
+ "type": "text",
939
+ "bbox": [
940
+ 0.508,
941
+ 0.75,
942
+ 0.915,
943
+ 0.891
944
+ ],
945
+ "angle": 0,
946
+ "content": "Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future."
947
+ }
948
+ ],
949
+ [
950
+ {
951
+ "type": "title",
952
+ "bbox": [
953
+ 0.086,
954
+ 0.069,
955
+ 0.212,
956
+ 0.084
957
+ ],
958
+ "angle": 0,
959
+ "content": "5.1 Challenges"
960
+ },
961
+ {
962
+ "type": "text",
963
+ "bbox": [
964
+ 0.087,
965
+ 0.096,
966
+ 0.486,
967
+ 0.22
968
+ ],
969
+ "angle": 0,
970
+ "content": "Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings."
971
+ },
972
+ {
973
+ "type": "text",
974
+ "bbox": [
975
+ 0.087,
976
+ 0.226,
977
+ 0.486,
978
+ 0.42
979
+ ],
980
+ "angle": 0,
981
+ "content": "Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure."
982
+ },
983
+ {
984
+ "type": "text",
985
+ "bbox": [
986
+ 0.087,
987
+ 0.426,
988
+ 0.486,
989
+ 0.648
990
+ ],
991
+ "angle": 0,
992
+ "content": "Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs."
993
+ },
994
+ {
995
+ "type": "text",
996
+ "bbox": [
997
+ 0.087,
998
+ 0.653,
999
+ 0.486,
1000
+ 0.75
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning."
1004
+ },
1005
+ {
1006
+ "type": "title",
1007
+ "bbox": [
1008
+ 0.087,
1009
+ 0.765,
1010
+ 0.263,
1011
+ 0.779
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "5.2 Future Directions"
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "bbox": [
1019
+ 0.087,
1020
+ 0.792,
1021
+ 0.486,
1022
+ 0.889
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point"
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "bbox": [
1030
+ 0.514,
1031
+ 0.07,
1032
+ 0.912,
1033
+ 0.097
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "clouds with sequence-based models, enabling fine-grained spatial understanding and generation."
1037
+ },
1038
+ {
1039
+ "type": "text",
1040
+ "bbox": [
1041
+ 0.514,
1042
+ 0.102,
1043
+ 0.913,
1044
+ 0.269
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems."
1048
+ },
1049
+ {
1050
+ "type": "text",
1051
+ "bbox": [
1052
+ 0.514,
1053
+ 0.274,
1054
+ 0.913,
1055
+ 0.385
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend \"an old rocking chair\" even if this specific type of chair never appeared in the training data."
1059
+ },
1060
+ {
1061
+ "type": "text",
1062
+ "bbox": [
1063
+ 0.514,
1064
+ 0.39,
1065
+ 0.913,
1066
+ 0.487
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts."
1070
+ },
1071
+ {
1072
+ "type": "text",
1073
+ "bbox": [
1074
+ 0.514,
1075
+ 0.488,
1076
+ 0.913,
1077
+ 0.599
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment."
1081
+ },
1082
+ {
1083
+ "type": "title",
1084
+ "bbox": [
1085
+ 0.514,
1086
+ 0.617,
1087
+ 0.637,
1088
+ 0.632
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "6 Conclusion"
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "bbox": [
1096
+ 0.514,
1097
+ 0.639,
1098
+ 0.913,
1099
+ 0.889
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement."
1103
+ }
1104
+ ],
1105
+ [
1106
+ {
1107
+ "type": "title",
1108
+ "bbox": [
1109
+ 0.085,
1110
+ 0.068,
1111
+ 0.181,
1112
+ 0.083
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "References"
1116
+ },
1117
+ {
1118
+ "type": "ref_text",
1119
+ "bbox": [
1120
+ 0.086,
1121
+ 0.087,
1122
+ 0.487,
1123
+ 0.142
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024."
1127
+ },
1128
+ {
1129
+ "type": "ref_text",
1130
+ "bbox": [
1131
+ 0.086,
1132
+ 0.147,
1133
+ 0.488,
1134
+ 0.23
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024."
1138
+ },
1139
+ {
1140
+ "type": "ref_text",
1141
+ "bbox": [
1142
+ 0.086,
1143
+ 0.234,
1144
+ 0.487,
1145
+ 0.303
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": "Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024."
1149
+ },
1150
+ {
1151
+ "type": "ref_text",
1152
+ "bbox": [
1153
+ 0.086,
1154
+ 0.307,
1155
+ 0.487,
1156
+ 0.391
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024."
1160
+ },
1161
+ {
1162
+ "type": "ref_text",
1163
+ "bbox": [
1164
+ 0.086,
1165
+ 0.394,
1166
+ 0.487,
1167
+ 0.449
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024."
1171
+ },
1172
+ {
1173
+ "type": "ref_text",
1174
+ "bbox": [
1175
+ 0.086,
1176
+ 0.454,
1177
+ 0.487,
1178
+ 0.509
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025."
1182
+ },
1183
+ {
1184
+ "type": "ref_text",
1185
+ "bbox": [
1186
+ 0.086,
1187
+ 0.513,
1188
+ 0.487,
1189
+ 0.569
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024."
1193
+ },
1194
+ {
1195
+ "type": "ref_text",
1196
+ "bbox": [
1197
+ 0.086,
1198
+ 0.573,
1199
+ 0.487,
1200
+ 0.627
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024."
1204
+ },
1205
+ {
1206
+ "type": "ref_text",
1207
+ "bbox": [
1208
+ 0.086,
1209
+ 0.632,
1210
+ 0.487,
1211
+ 0.701
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024."
1215
+ },
1216
+ {
1217
+ "type": "ref_text",
1218
+ "bbox": [
1219
+ 0.086,
1220
+ 0.705,
1221
+ 0.487,
1222
+ 0.801
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024."
1226
+ },
1227
+ {
1228
+ "type": "ref_text",
1229
+ "bbox": [
1230
+ 0.086,
1231
+ 0.806,
1232
+ 0.487,
1233
+ 0.889
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023."
1237
+ },
1238
+ {
1239
+ "type": "list",
1240
+ "bbox": [
1241
+ 0.086,
1242
+ 0.087,
1243
+ 0.488,
1244
+ 0.889
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": null
1248
+ },
1249
+ {
1250
+ "type": "ref_text",
1251
+ "bbox": [
1252
+ 0.513,
1253
+ 0.07,
1254
+ 0.913,
1255
+ 0.125
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024."
1259
+ },
1260
+ {
1261
+ "type": "ref_text",
1262
+ "bbox": [
1263
+ 0.513,
1264
+ 0.13,
1265
+ 0.913,
1266
+ 0.198
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023."
1270
+ },
1271
+ {
1272
+ "type": "ref_text",
1273
+ "bbox": [
1274
+ 0.513,
1275
+ 0.205,
1276
+ 0.913,
1277
+ 0.274
1278
+ ],
1279
+ "angle": 0,
1280
+ "content": "Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024."
1281
+ },
1282
+ {
1283
+ "type": "ref_text",
1284
+ "bbox": [
1285
+ 0.513,
1286
+ 0.279,
1287
+ 0.913,
1288
+ 0.362
1289
+ ],
1290
+ "angle": 0,
1291
+ "content": "Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024."
1292
+ },
1293
+ {
1294
+ "type": "ref_text",
1295
+ "bbox": [
1296
+ 0.513,
1297
+ 0.367,
1298
+ 0.913,
1299
+ 0.424
1300
+ ],
1301
+ "angle": 0,
1302
+ "content": "Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024."
1303
+ },
1304
+ {
1305
+ "type": "ref_text",
1306
+ "bbox": [
1307
+ 0.513,
1308
+ 0.429,
1309
+ 0.913,
1310
+ 0.498
1311
+ ],
1312
+ "angle": 0,
1313
+ "content": "Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024."
1314
+ },
1315
+ {
1316
+ "type": "ref_text",
1317
+ "bbox": [
1318
+ 0.513,
1319
+ 0.503,
1320
+ 0.913,
1321
+ 0.572
1322
+ ],
1323
+ "angle": 0,
1324
+ "content": "Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019."
1325
+ },
1326
+ {
1327
+ "type": "ref_text",
1328
+ "bbox": [
1329
+ 0.513,
1330
+ 0.577,
1331
+ 0.913,
1332
+ 0.646
1333
+ ],
1334
+ "angle": 0,
1335
+ "content": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022."
1336
+ },
1337
+ {
1338
+ "type": "ref_text",
1339
+ "bbox": [
1340
+ 0.513,
1341
+ 0.651,
1342
+ 0.913,
1343
+ 0.708
1344
+ ],
1345
+ "angle": 0,
1346
+ "content": "Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing."
1347
+ },
1348
+ {
1349
+ "type": "ref_text",
1350
+ "bbox": [
1351
+ 0.513,
1352
+ 0.713,
1353
+ 0.913,
1354
+ 0.781
1355
+ ],
1356
+ "angle": 0,
1357
+ "content": "Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024."
1358
+ },
1359
+ {
1360
+ "type": "ref_text",
1361
+ "bbox": [
1362
+ 0.513,
1363
+ 0.787,
1364
+ 0.913,
1365
+ 0.856
1366
+ ],
1367
+ "angle": 0,
1368
+ "content": "Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024."
1369
+ },
1370
+ {
1371
+ "type": "ref_text",
1372
+ "bbox": [
1373
+ 0.513,
1374
+ 0.861,
1375
+ 0.913,
1376
+ 0.89
1377
+ ],
1378
+ "angle": 0,
1379
+ "content": "Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning"
1380
+ },
1381
+ {
1382
+ "type": "list",
1383
+ "bbox": [
1384
+ 0.513,
1385
+ 0.07,
1386
+ 0.913,
1387
+ 0.89
1388
+ ],
1389
+ "angle": 0,
1390
+ "content": null
1391
+ }
1392
+ ],
1393
+ [
1394
+ {
1395
+ "type": "ref_text",
1396
+ "bbox": [
1397
+ 0.102,
1398
+ 0.069,
1399
+ 0.489,
1400
+ 0.111
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024."
1404
+ },
1405
+ {
1406
+ "type": "ref_text",
1407
+ "bbox": [
1408
+ 0.086,
1409
+ 0.117,
1410
+ 0.49,
1411
+ 0.201
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024."
1415
+ },
1416
+ {
1417
+ "type": "ref_text",
1418
+ "bbox": [
1419
+ 0.085,
1420
+ 0.207,
1421
+ 0.49,
1422
+ 0.263
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": "Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024."
1426
+ },
1427
+ {
1428
+ "type": "ref_text",
1429
+ "bbox": [
1430
+ 0.086,
1431
+ 0.269,
1432
+ 0.489,
1433
+ 0.325
1434
+ ],
1435
+ "angle": 0,
1436
+ "content": "Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024."
1437
+ },
1438
+ {
1439
+ "type": "ref_text",
1440
+ "bbox": [
1441
+ 0.086,
1442
+ 0.331,
1443
+ 0.49,
1444
+ 0.401
1445
+ ],
1446
+ "angle": 0,
1447
+ "content": "Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024."
1448
+ },
1449
+ {
1450
+ "type": "ref_text",
1451
+ "bbox": [
1452
+ 0.086,
1453
+ 0.407,
1454
+ 0.49,
1455
+ 0.49
1456
+ ],
1457
+ "angle": 0,
1458
+ "content": "Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024."
1459
+ },
1460
+ {
1461
+ "type": "ref_text",
1462
+ "bbox": [
1463
+ 0.086,
1464
+ 0.496,
1465
+ 0.49,
1466
+ 0.58
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021."
1470
+ },
1471
+ {
1472
+ "type": "ref_text",
1473
+ "bbox": [
1474
+ 0.086,
1475
+ 0.586,
1476
+ 0.489,
1477
+ 0.614
1478
+ ],
1479
+ "angle": 0,
1480
+ "content": "Alec Radford. Improving language understanding by generative pre-training. 2018."
1481
+ },
1482
+ {
1483
+ "type": "ref_text",
1484
+ "bbox": [
1485
+ 0.086,
1486
+ 0.62,
1487
+ 0.489,
1488
+ 0.69
1489
+ ],
1490
+ "angle": 0,
1491
+ "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024."
1492
+ },
1493
+ {
1494
+ "type": "ref_text",
1495
+ "bbox": [
1496
+ 0.086,
1497
+ 0.696,
1498
+ 0.489,
1499
+ 0.753
1500
+ ],
1501
+ "angle": 0,
1502
+ "content": "Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024."
1503
+ },
1504
+ {
1505
+ "type": "ref_text",
1506
+ "bbox": [
1507
+ 0.086,
1508
+ 0.758,
1509
+ 0.489,
1510
+ 0.813
1511
+ ],
1512
+ "angle": 0,
1513
+ "content": "Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023."
1514
+ },
1515
+ {
1516
+ "type": "ref_text",
1517
+ "bbox": [
1518
+ 0.086,
1519
+ 0.819,
1520
+ 0.49,
1521
+ 0.89
1522
+ ],
1523
+ "angle": 0,
1524
+ "content": "Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024."
1525
+ },
1526
+ {
1527
+ "type": "list",
1528
+ "bbox": [
1529
+ 0.085,
1530
+ 0.069,
1531
+ 0.49,
1532
+ 0.89
1533
+ ],
1534
+ "angle": 0,
1535
+ "content": null
1536
+ },
1537
+ {
1538
+ "type": "ref_text",
1539
+ "bbox": [
1540
+ 0.513,
1541
+ 0.069,
1542
+ 0.914,
1543
+ 0.138
1544
+ ],
1545
+ "angle": 0,
1546
+ "content": "Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025."
1547
+ },
1548
+ {
1549
+ "type": "ref_text",
1550
+ "bbox": [
1551
+ 0.512,
1552
+ 0.144,
1553
+ 0.914,
1554
+ 0.213
1555
+ ],
1556
+ "angle": 0,
1557
+ "content": "Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023."
1558
+ },
1559
+ {
1560
+ "type": "ref_text",
1561
+ "bbox": [
1562
+ 0.512,
1563
+ 0.218,
1564
+ 0.914,
1565
+ 0.287
1566
+ ],
1567
+ "angle": 0,
1568
+ "content": "Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024."
1569
+ },
1570
+ {
1571
+ "type": "ref_text",
1572
+ "bbox": [
1573
+ 0.512,
1574
+ 0.292,
1575
+ 0.915,
1576
+ 0.362
1577
+ ],
1578
+ "angle": 0,
1579
+ "content": "Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024."
1580
+ },
1581
+ {
1582
+ "type": "ref_text",
1583
+ "bbox": [
1584
+ 0.512,
1585
+ 0.366,
1586
+ 0.915,
1587
+ 0.422
1588
+ ],
1589
+ "angle": 0,
1590
+ "content": "Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023."
1591
+ },
1592
+ {
1593
+ "type": "ref_text",
1594
+ "bbox": [
1595
+ 0.512,
1596
+ 0.427,
1597
+ 0.914,
1598
+ 0.481
1599
+ ],
1600
+ "angle": 0,
1601
+ "content": "Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024."
1602
+ },
1603
+ {
1604
+ "type": "ref_text",
1605
+ "bbox": [
1606
+ 0.512,
1607
+ 0.487,
1608
+ 0.914,
1609
+ 0.557
1610
+ ],
1611
+ "angle": 0,
1612
+ "content": "Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022."
1613
+ },
1614
+ {
1615
+ "type": "ref_text",
1616
+ "bbox": [
1617
+ 0.512,
1618
+ 0.562,
1619
+ 0.914,
1620
+ 0.63
1621
+ ],
1622
+ "angle": 0,
1623
+ "content": "Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024."
1624
+ },
1625
+ {
1626
+ "type": "ref_text",
1627
+ "bbox": [
1628
+ 0.512,
1629
+ 0.636,
1630
+ 0.914,
1631
+ 0.69
1632
+ ],
1633
+ "angle": 0,
1634
+ "content": "Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023."
1635
+ },
1636
+ {
1637
+ "type": "ref_text",
1638
+ "bbox": [
1639
+ 0.512,
1640
+ 0.696,
1641
+ 0.914,
1642
+ 0.766
1643
+ ],
1644
+ "angle": 0,
1645
+ "content": "Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023."
1646
+ },
1647
+ {
1648
+ "type": "ref_text",
1649
+ "bbox": [
1650
+ 0.512,
1651
+ 0.77,
1652
+ 0.914,
1653
+ 0.826
1654
+ ],
1655
+ "angle": 0,
1656
+ "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024."
1657
+ },
1658
+ {
1659
+ "type": "ref_text",
1660
+ "bbox": [
1661
+ 0.512,
1662
+ 0.831,
1663
+ 0.914,
1664
+ 0.885
1665
+ ],
1666
+ "angle": 0,
1667
+ "content": "Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024."
1668
+ },
1669
+ {
1670
+ "type": "list",
1671
+ "bbox": [
1672
+ 0.512,
1673
+ 0.069,
1674
+ 0.915,
1675
+ 0.885
1676
+ ],
1677
+ "angle": 0,
1678
+ "content": null
1679
+ }
1680
+ ]
1681
+ ]
data/2025/2504_05xxx/2504.05786/e603c6f4-386e-4380-abf7-2f18915b0ee6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43044cf426c1d8e135bcc319d8e69ef6ff891e01a61be4ee98744763c99eafef
3
+ size 2019424
data/2025/2504_05xxx/2504.05786/full.md ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to Enable LLM with 3D Capacity? A Survey of Spatial Reasoning in LLM
2
+
3
+ Jirong Zha $^{1*}$ , Yuxuan Fan $^{2*}$ , Xiao Yang $^{2}$ , Chen Gao $^{1\dagger}$ , Xinlei Chen $^{1\dagger}$
4
+
5
+ $^{1}$ Tsinghua University
6
+
7
+ 2The Hong Kong University of Science and Technology (Guang Zhou)
8
+
9
+ zhajirong23@mails.tsinghua.edu.cn, {yfan546, xyang856}@connect.hkust-gz.edu.cn, chgao96@gmail.com, chen.xinlei@sz.tsinghua.edu.cn
10
+
11
+ # Abstract
12
+
13
+ 3D spatial understanding is essential in real-world applications such as robotics, autonomous vehicles, virtual reality, and medical imaging. Recently, Large Language Models (LLMs), having demonstrated remarkable success across various domains, have been leveraged to enhance 3D understanding tasks, showing potential to surpass traditional computer vision methods. In this survey, we present a comprehensive review of methods integrating LLMs with 3D spatial understanding. We propose a taxonomy that categorizes existing methods into three branches: image-based methods deriving 3D understanding from 2D visual data, point cloud-based methods working directly with 3D representations, and hybrid modality-based methods combining multiple data streams. We systematically review representative methods along these categories, covering data representations, architectural modifications, and training strategies that bridge textual and 3D modalities. Finally, we discuss current limitations, including dataset scarcity and computational challenges, while highlighting promising research directions in spatial perception, multi-modal fusion, and real-world applications.
14
+
15
+ # 1 Introduction
16
+
17
+ Large Language Models (LLMs) have evolved from basic neural networks to advanced transformer models like BERT [Kenton and Toutanova, 2019] and GPT [Radford, 2018], originally excelling at language tasks by learning from vast text datasets. Recent advancements, however, have extended these models beyond pure linguistic processing to encompass multimodal ability (In this paper, when we refer to LLMs, we specifically mean those that integrate multimodal functions). Their ability to capture complex patterns and relationships [Chen et al., 2024a] now holds promise for spatial reasoning tasks [Ma et al., 2024b]. By applying these enhanced models to challenges such as understanding 3D object relationships and spatial navigation, we open up new opportunities for advancing fields like robotics, computer vision, and augmented reality [Gao et al., 2024].
18
+
19
+ ![](images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg)
20
+ Figure 1: Large Language Models can acquire 3D spatial reasoning capabilities through various input sources including multi-view images, RGB-D images, point clouds, and hybrid modalities, enabling the processing and understanding of three-dimensional information.
21
+
22
+ At the same time, 3D data and 3D modeling techniques have seen significant developments [Ma et al., 2024c], finding extensive applications in virtual and augmented reality, robotics, autonomous vehicles, gaming, medical imaging, and more. Unlike traditional two-dimensional images, 3D data provides a richer view of objects and environments, capturing essential spatial relationships and geometry. Such information is critical for tasks like scene reconstruction, object manipulation, and autonomous navigation, where merely text-based descriptions or 2D representations may fall short of conveying the necessary depth or spatial context.
23
+
24
+ LLMs help Spatial Understanding. Bringing these two fields together—powerful language understanding from LLMs and the spatial realism of 3D data—offers the potential for highly capable, context-aware systems. From a linguistic perspective, real-world descriptions often reference physical arrangement, orientation, or manipulations of objects in space. Text alone can be imprecise or ambiguous about size, shape, or relative positioning unless one can integrate a robust spatial or visual understanding. Consequently, there is growing interest in enhancing LLMs with a "3D capacity" that enables them to interpret, reason, and even generate three-dimensional representations based on natural language prompts. Such an integrated approach opens up exciting prospects: robots that can follow language instructions more effectively by grounding their commands in 3D context, architects who quickly prototype 3D layouts from textual descriptions, game design
25
+
26
+ ers who generate immersive environments for narrative-based experiences, and many other creative applications yet to be envisioned.
27
+
28
+ Motivation. Although LLMs have been increasingly applied in 3D-related tasks, and Ma et al. [2024b] provided a systematic overview of this field, the rapid advancement of this domain has led to numerous new developments in recent months, necessitating an up-to-date survey that captures these recent breakthroughs. Integrating 3D capacity into LLMs faces several key challenges: (1) the scarcity of high-quality 3D datasets compared to abundant text corpora; (2) the fundamental mismatch between sequential text data and continuous 3D spatial structures, requiring specialized architectural adaptations; and (3) the intensive computational requirements for processing 3D data at scale. While early attempts at combining language and 3D have shown promise, current approaches often remain limited in scope, scalability, and generalization capability. Most existing solutions are domain-specific and lack the broad applicability characteristic of text-based LLMs.
29
+
30
+ Contribution. The contributions of this work are summarized in the following three aspects: (1) A structured taxonomy. We provide a timely and comprehensive survey that distinguishes itself from the systematic overview offered by Ma et al. [2024b] by presenting a novel perspective on LLM applications in 3D-related tasks: our work constructs a structured taxonomy that categorizes existing research into three primary groups (Figure 2) and offers a forward-looking analysis of the latest breakthroughs, thereby underscoring our unique contributions and the significance of our approach in advancing the field. (2) A comprehensive review. Building on the proposed taxonomy, we systematically review the current research progress on LLMs for spatial reasoning tasks. (3) Future directions. We highlight the remaining limitations of existing works and suggest potential directions for future research.
31
+
32
+ # 2 Preliminary
33
+
34
+ # 2.1 Large Language Models
35
+
36
+ Large Language Models (LLMs) have evolved from early word embeddings to context-aware models like BERT [Kenton and Toutanova, 2019]. Generative transformers such as GPT series [Radford, 2018], have further advanced text generation and few-shot learning. However, these models often struggle with spatial reasoning due to their focus on textual patterns, prompting efforts to integrate external spatial knowledge [Fu et al., 2024].
37
+
38
+ Vision-Language Models (VLMs) extend LLMs by aligning visual data with text. Early examples like CLIP [Radford et al., 2021] leverage co-attentional architectures and contrastive learning, while later models such as BLIP [Li et al., 2022] refine these techniques with larger datasets. Yet, most VLMs process only 2D data, limiting their ability to capture detailed 3D spatial configurations. Integrating 3D context via depth maps, point clouds, or voxels remains challenging, motivating ongoing research toward more robust spatial intelligence.
39
+
40
+ # 2.2 3D Data Structures
41
+
42
+ 3D data has different structures, which are essential for understanding the three-dimensional world, and common methods
43
+
44
+ include point clouds, voxel grids, polygonal meshes, neural fields, hybrid representations, and 3D Gaussian splatting. Point clouds represent shapes using discrete points, typically denoted as
45
+
46
+ $$
47
+ P = \left\{p _ {i} \in \mathbb {R} ^ {3} \mid i = 1, \dots , N \right\},
48
+ $$
49
+
50
+ which are storage-efficient but lack surface topology. Voxel grids partition space into uniform cubes, with each voxel $V(i,j,k)$ storing occupancy or distance values, providing detailed structure at the expense of increased memory usage at higher resolutions. Polygonal meshes compactly encode complex geometries through a set of vertices $\{v_{i}\}$ and faces $\{F_j\}$ , though their unstructured and non-differentiable nature poses challenges for integration with neural networks. Neural fields offer an implicit approach by modeling 3D shapes as continuous and differentiable functions, such as
51
+
52
+ $$
53
+ f _ {\theta}: \mathbb {R} ^ {3} \rightarrow (c, \sigma),
54
+ $$
55
+
56
+ which maps spatial coordinates to color $c$ and density $\sigma$ . Hybrid representations combine these neural fields with traditional volumetric methods (e.g., integrating $f_{\theta}$ with voxel grids) to achieve high-quality, real-time rendering. Meanwhile, 3D Gaussian splatting enhances point clouds by associating each point $p_i$ with a covariance matrix $\Sigma_i$ and color $c_i$ , efficiently encoding radiance information for rendering. Each method has its unique strengths and trade-offs, making them suitable for different applications in 3D understanding and generation.
57
+
58
+ # 2.3 Proposed taxonomy
59
+
60
+ We propose a taxonomy that classifies 3D-LLM research into three main categories based on input modalities and integration strategies, as shown in Figure 1: Image-based spatial reasoning encompasses approaches that derive 3D understanding from 2D images. This includes multi-view methods that reconstruct 3D scenes, RGB-D images providing explicit depth information, monocular 3D perception inferring depth from single views, and medical imaging applications. While these approaches benefit from readily available image data and existing vision models, they may struggle with occlusions and viewpoint limitations. Point cloud-based spatial reasoning works directly with 3D point cloud data through three alignment strategies: (1) Direct alignment that immediately connects point features with language embeddings, (2) Step-by-step alignment that follows sequential stages to bridge modalities, and (3) Task-specific alignment customized for particular spatial reasoning requirements. These methods maintain geometric fidelity but face challenges in handling unstructured 3D data. Hybrid modality-based spatial reasoning combines multiple data streams through either tightly or loosely coupled architectures. Tightly coupled approaches integrate modalities through shared embeddings or end-to-end training, while loosely coupled methods maintain modular components with defined interfaces between them. This enables leveraging complementary strengths across modalities but increases architectural complexity.
61
+
62
+ This taxonomy provides a structured framework for understanding the diverse technical approaches in the field while highlighting the distinct challenges and trade-offs each branch
63
+
64
+ ![](images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg)
65
+ Figure 2: A Taxonomy of Models for Spatial Reasoning with LLMs: Image-based, Point Cloud-based, and Hybrid Modality-based Approaches and Their Subdivisions.
66
+
67
+ ![](images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg)
68
+ Figure 3: An overview of image-based approaches.
69
+
70
+ must address. Figure 2 presents a detailed breakdown of representative works in each category.
71
+
72
+ # 3 Recent Advances of Spatial Reasoning in LLM
73
+
74
+ # 3.1 Image-based Spatial Reasoning
75
+
76
+ Image-based spatial reasoning methods can be categorized based on their input modalities: multi-view images, monocular images, RGB-D images, and 3D medical images shown in Figure 3. Each modality offers unique advantages for enhancing 3D understanding in Large Language Models (LLMs). Multi-view images provide spatial data from different perspectives, monocular images extract 3D insights from a single view, RGB-D images incorporate depth information, and 3D medical images address domain-specific challenges in healthcare. These categories highlight the strengths and challenges of each approach in improving spatial reasoning capabilities.
77
+
78
+ # 3.1.1 Multi-view Images as input
79
+
80
+ Several studies explore multi-view images to enhance LLMs' spatial understanding. LLaVA-3D Zhu et al. [2024b] leverages multi-view images and 3D positional embeddings to create 3D Patches, achieving state-of-the-art 3D spatial understanding while maintaining 2D image understanding capabilities. Agent3D-Zero Zhang et al. [2024] utilizes multiple images from different viewpoints, enabling VLMs to perform robust reasoning and understand spatial relationships, achieving zero-shot scene understanding. ShapeLLM Qi et al. [2024a] also uses multi-view image input, with robustness to occlusions. Scene-LLM Fu et al. [2024] uses multi-view images to build 3D feature representations, incorporating scene-level and egocentric 3D information to support interactive planning. SpatialPIN Ma et al. [2024a] enhances VLM's spatial reasoning by decomposing, understanding and reconstructing explicit 3D representations from multi-view images and generalizes to various 3D tasks. LLMI3D Yang et al. [2024] extracts spatially enhanced local features from high-resolution images using CNNs and a depth predictor and uses ViT to obtain tokens from low-resolution images. It employs a spatially enhanced cross-branch attention mechanism to effectively mine spatial local features of objects and uses geometric projection to handle. Extracting multi-view features results in huge computational overhead and ignores the essential geometry and depth information. Additionally, plain texts often lead to ambiguities especially in cluttered and complex 3D environmentsChen et al. [2024c]. ConceptGraphs Gu et al. [2024] proposes a graph-structured representation for 3D scenes that operates with an open vocabulary, which is developed by utilizing 2D foundation models and integrating their outputs into a 3D format through multiview association.
81
+
82
+ # 3.1.2 Monocular Image as input
83
+
84
+ LLMI3D Yang et al. [2024] uses a single 2D image for 3D perception, enhancing performance through spatial local feature mining, 3D query token decoding, and geometry-based
85
+
86
+ 3D reasoning. It uses a depth predictor and CNN to extract spatial local features and uses learnable 3D query tokens for geometric coordinate regression. It combines black-box networks and white-box projection to address changes in camera focal lengths.
87
+
88
+ # 3.1.3 RGB-D Image as Input
89
+
90
+ Depth is estimated in SpatialPIN Ma et al. [2024a] by ZoeDepth when finding field of view (FOV) through perspective fields and provided for 3D-scene understanding and reconstruction. M3D-LaMed Bai et al. [2024] pre-trains the 3D medical vision encoder with medical image slices along depth and introduces end-to-end tuning to integrate 3D information into LLM.
91
+
92
+ # 3.1.4 3D Medical Image as input
93
+
94
+ Unlike previous research focused on 2D medical images, integrating multi-modal other information such as textual descriptions, M3D-LaMed Bai et al. [2024] is specifically designed for 3D CT images by analyzing spatial features. It demonstrates excellent performance across multiple tasks, including image-text retrieval, report generation, visual question answering, localization, and segmentation. In order to generate radiology reports automatically, a brand-new framework Liu et al. [2024a] is proposed to employs low-resolution (LR) visual tokens as queries to extract information from high-resolution (HR) tokens, ensuring that detailed information is retained across HR volumes while minimizing computational costs by processing only the HR-informed LR visual queries. 3D-CT-GPT Chen et al. [2024b], based medical visual language model, is tailored for the generation of radiology reports from 3D CT scans, with a focus on chest CTs. OpenMEDLab Wang et al. [2024] comprises and publishes a variety of medical foundation models to process multi-modal medical data including Color Fundus Photography (CFP), Optical Coherence Tomography (OCT), endoscopy videos, CT&MR volumes and other pathology images.
95
+
96
+ # 3.1.5 Discussion
97
+
98
+ Image-based spatial reasoning methods offer significant advantages, such as easy data acquisition and integration with pre-trained 2D models. Multi-view images provide rich spatial information, while depth estimation enhances scene understanding. However, challenges remain, including limited depth from single views, scale uncertainty, occlusion, and viewpoint dependency. These methods also face issues with visual hallucinations, generalization to novel scenes, and high computational costs. Future research should focus on improving multi-view integration and depth estimation to address these limitations.
99
+
100
+ # 3.2 Recent Advances of Point Cloud-based Spatial Reasoning
101
+
102
+ As shown in Figure 4, point cloud-based spatial reasoning has advanced significantly in recent years, employing three main alignment methods: Direct, Step-by-step, and Task-specific Alignment. These methods are essential for integrating point cloud data with language models to enable effective spatial reasoning. Direct Alignment establishes immediate connections between point cloud features and language model em
103
+
104
+ ![](images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg)
105
+ Figure 4: An overview of point cloud-based approaches.
106
+
107
+ beddings, while Step-by-step Alignment follows a sequential process through multiple stages. Task-specific Alignment is customized for particular spatial reasoning requirements. The choice of method depends on specific application needs and constraints.
108
+
109
+ # 3.2.1 Direct Alignment
110
+
111
+ Direct alignment methods create direct connections between point cloud data and language models. PointCLIP [Zhang et al., 2022] was a pioneer, projecting point clouds into multiview depth maps and using CLIP's pre-trained visual encoder for feature extraction, which was then aligned with textual features through a hand-crafted template. This approach showed promising results in zero-shot and few-shot classification tasks by transferring 2D knowledge to the 3D domain. PointCLIP V2 [Zhu et al., 2023] improved the projection quality with a realistic projection module and used GPT-3 for generating 3D-specific text descriptions, achieving better performance in zero-shot classification, part segmentation, and object detection. Chat-Scene [Huang et al., 2024] introduced object identifiers to facilitate object referencing during user-assistant interactions, representing scenes through object-centric embeddings. PointLLM [Xu et al., 2025] advanced the field by integrating a point cloud encoder with a powerful LLM, effectively fusing geometric, appearance, and linguistic information, and overcoming data scarcity with automated generation. These methods demonstrate the potential for effective 3D point cloud understanding through language models, enabling improved spatial reasoning and human-AI interaction.
112
+
113
+ # 3.2.2 Step-by-step Alignment
114
+
115
+ Step-by-step alignment has gained popularity in integrating point cloud features with language models. Notable approaches include GPT4Point [Qi et al., 2024b], which uses a Bert-based Point-QFormer for point-text feature alignment, followed by object generation. Grounded 3D-LLMs [Chen et al., 2024d] first aligns 3D scene embeddings with textual descriptions via contrastive pre-training, then fine-tunes with referent tokens. LiDAR-LLMs [Yang et al., 2023] employ a three-stage process: cross-modal alignment, object-centric learning, and high-level instruction fine-tuning. MiniGPT-3D [Tang et al., 2024a] follows a four-stage strategy, from point cloud projection to advanced model enhancements using Mixture of Query Experts. GreenPLM [Tang et al., 2024b] uses
116
+
117
+ ![](images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg)
118
+ Figure 5: An overview of hybrid modality-based approaches.
119
+
120
+ a three-stage method that aligns a text encoder with an LLM using large text data, followed by point-LLM alignment with 3D data. These step-by-step approaches highlight the gradual improvement of spatial reasoning in 3D contexts, offering valuable insights for future research.
121
+
122
+ # 3.2.3 Task-specific Alignment
123
+
124
+ Task-specific alignment customizes models for specific spatial reasoning tasks to improve performance and generalization. SceneVerse [Jia et al., 2024] introduces a large 3D vision-language dataset and Grounded Pre-training for Scenes (GPS), using multi-level contrastive alignment for unified scene-text alignment, achieving state-of-the-art results in tasks like 3D visual grounding and question answering. LL3DA [Chen et al., 2024c] presents a dialogue system that integrates textual instructions and visual interactions, excelling in complex 3D environments. Chat-3D [Wang et al., 2023] proposes a three-stage training scheme to align 3D scene representations with language models, capturing spatial relations with limited data. VisProg [Yuan et al., 2024] introduces visual programming for zero-shot open-vocabulary 3D grounding, leveraging LLMs to generate and execute programmatic representations. These task-specific approaches highlight the importance of adapting models to complex spatial relationships, enabling robust performance even with limited data or zero-shot tasks.
125
+
126
+ # 3.2.4 Discussion
127
+
128
+ The three alignment approaches—Direct, Step-by-step, and Task-specific—each have distinct strengths and challenges. Direct alignment offers efficiency and quick results but struggles with complex spatial relationships. Step-by-step alignment improves feature integration at the cost of higher computational resources and training time. Task-specific alignment excels in specialized tasks but may lack broader applicability.
129
+
130
+ # 3.3 Hybrid Modality-based Spatial Reasoning
131
+
132
+ Hybrid modality-based spatial reasoning integrates point clouds, images, and LLMs through Tightly Coupled and Loosely Coupled approaches, as shown in Figure 5. The Tightly Coupled approach fosters close integration, enabling seamless interaction and high performance, while the Loosely Coupled approach promotes modularity, allowing independent operation of components for greater scalability and flexibility at the cost of reduced real-time interaction.
133
+
134
+ # 3.3.1 Tightly Coupled
135
+
136
+ Several recent works have explored tightly integrated approaches for spatial reasoning across point clouds, images and
137
+
138
+ language modalities: Point-Bind [Guo et al., 2023] proposes a joint embedding space to align point clouds with images and text through contrastive learning. It leverages ImageBind to construct unified representations that enable tasks like zero-shot classification, open-world understanding and multi-modal generation. The tight coupling allows Point-Bind to reason about point clouds using both visual and linguistic cues. JM3D [Ji et al., 2024] introduces a Structured Multimodal Organizer that tightly fuses multi-view images and hierarchical text trees with point clouds. This coupled architecture enables detailed spatial understanding by leveraging complementary information across modalities. The Joint Multi-modal Alignment further enhances the synergistic relationships between visual and linguistic features. Uni3D [Zhou et al., 2023] employs a unified transformer architecture that directly aligns point cloud features with image-text representations. By tightly coupling the modalities through end-to-end training, it achieves strong performance on tasks like zero-shot classification and open-world understanding. The shared backbone enables efficient scaling to billion-parameter models. Uni3D-LLM [Liu et al., 2024b] extends this tight coupling to LLMs through an LLM-to-Generator mapping block. This enables unified perception, generation and editing of point clouds guided by natural language. The tight integration allows leveraging rich semantic knowledge from LLMs while maintaining high-quality 3D understanding.
139
+
140
+ # 3.3.2 Loosely Coupled
141
+
142
+ Loosely coupled approaches maintain greater independence between different modalities while still enabling interaction through well-defined interfaces. MultiPLY [Hong et al., 2024] proposes a multisensory embodied LLM that handles multiple input modalities (visual, audio, tactile, thermal) through separate encoders. The modalities are processed independently and communicate through action tokens and state tokens. This decoupled design allows the system to process each modality with specialized encoders optimized for that data type, while enabling scalability and modularity in the system architecture. Similarly, UniPoint-LLM [Liu et al.] introduces a Multimodal Universal Token Space (MUTS) that loosely connects point clouds and images through independent encoders and a shared mapping layer. This modular design allows easy integration of new modalities and simplified training by only requiring alignment between new modalities and text, rather than pairwise alignment between all modalities. The main benefits of loosely coupled architectures include greater modularity and flexibility in system design, easier integration of new modalities, and independent scaling of different components. However, this approach may result in less optimal joint representation learning, reduced real-time interaction capabilities, and potential information loss between modalities compared to tightly coupled approaches.
143
+
144
+ # 3.3.3 Discussion
145
+
146
+ The choice between tightly and loosely coupled approaches presents important tradeoffs in multimodal spatial reasoning systems. Tightly coupled approaches like Point-Bind and JM3D offer stronger joint representation learning and real-time interaction capabilities through end-to-end training and shared feature spaces. This makes them particularly suitable
147
+
148
+ <table><tr><td></td><td>Model</td><td>Data Source</td><td>Alignment Type</td><td>Pre-training</td><td>Fine-tuning</td><td>Task</td><td>Code</td></tr><tr><td rowspan="11">Image - based</td><td>LLaVA-3D [Zhu et al., 2024b]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D Scene Understanding</td><td>code</td></tr><tr><td>Agent3D-Zero [Zhang et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✗</td><td>3D VQA, 3D Semantic Segmentation</td><td>✗</td></tr><tr><td>ShapeLLM [Qi et al., 2024a]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Scene Captioning</td><td>code</td></tr><tr><td>Scene-LLM [Fu et al., 2024]</td><td>Multi-view Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, Dense Captioning</td><td>✗</td></tr><tr><td>SpatialPIN [Ma et al., 2024a]</td><td>RGB-D Images</td><td>-</td><td>✓</td><td>✗</td><td>3D Motion Planning, Task Video Generation</td><td>✗</td></tr><tr><td>LLMI3D [Yang et al., 2024]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>3D Grounding, 3D VQA</td><td>✗</td></tr><tr><td>Spatialvm [Chen et al., 2024a]</td><td>Monocular Images</td><td>-</td><td>✓</td><td>✓</td><td>Dense Reward Annotator, Spatial Data Generation</td><td>code</td></tr><tr><td>M3D-LaMed [Bai et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3D VQA, 3D VLP</td><td>code</td></tr><tr><td>HILT [Liu et al., 2024a]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>3DHRG</td><td>✗</td></tr><tr><td>3D-CT-GPT [Chen et al., 2024b]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Radiology Report Generation, 3D VQA</td><td>✗</td></tr><tr><td>OpenMEDLab [Wang et al., 2024]</td><td>Medical Images</td><td>-</td><td>✓</td><td>✓</td><td>Medical Imaging</td><td>code</td></tr><tr><td rowspan="16">Point Cloud - based</td><td>PointLLM [Xu et al., 2025]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>Chat-Scene [Huang et al., 2024]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Visual Grounding, 3D Scene Captioning</td><td>code</td></tr><tr><td>PointCLIP [Zhang et al., 2022]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>PointCLIPv2 [Zhu et al., 2023]</td><td>Point Cloud</td><td>Direct Alignment</td><td>✓</td><td>✓</td><td>3D Point Cloud Classification</td><td>code</td></tr><tr><td>GPT4Point [Qi et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Understanding</td><td>code</td></tr><tr><td>MiniGPT-3D [Tang et al., 2024a]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification, 3D Object Captioning</td><td>code</td></tr><tr><td>GreenPLM [Tang et al., 2024b]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Classification</td><td>code</td></tr><tr><td>Grounded 3D-LLM [Chen et al., 2024d]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Object Detection, 3D VQA</td><td>code</td></tr><tr><td>Lidar-LLM [Yang et al., 2023]</td><td>Point Cloud</td><td>Step-by-step Alignment</td><td>✓</td><td>✓</td><td>3D Captioning, 3D Grounding</td><td>code</td></tr><tr><td>3D-LLaVA [Deng et al., 2025]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA, 3D Captioning</td><td>code</td></tr><tr><td>ScanReason [Zhu et al., 2024a]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Reasoning Grounding</td><td>code</td></tr><tr><td>SegPoint [He et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Instruction Segmentation</td><td>✗</td></tr><tr><td>Kestrel [Fei et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Part-Aware Point Grounding</td><td>✗</td></tr><tr><td>SIG3D [Man et al., 2024]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>Situation Estimation</td><td>code</td></tr><tr><td>Chat-3D [Wang et al., 2023]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D VQA</td><td>code</td></tr><tr><td>LL3DA [Chen et al., 2024c]</td><td>Point Cloud</td><td>Task-specific Alignment</td><td>✓</td><td>✓</td><td>3D Dense Captioning</td><td>code</td></tr><tr><td rowspan="6">Hybrid - based</td><td>Point-bind [Guo et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D Cross-modal Retrieval, Any-to-3D Generation</td><td>code</td></tr><tr><td>JM3D [Ji et al., 2024]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Image-3D Retrieval, 3D Part Segmentation</td><td>code</td></tr><tr><td>Uni3D [Zhou et al., 2023]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>Zero-shot Shape Classification</td><td>code</td></tr><tr><td>Uni3D-LLM [Liu et al., 2024b]</td><td>Point cloud, Image</td><td>Tightly Coupled</td><td>✓</td><td>✓</td><td>3D VQA</td><td>✗</td></tr><tr><td>MultiPLY [Hong et al., 2024]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>Object retrieval</td><td>code</td></tr><tr><td>UniPoint-LLM [Liu et al.]</td><td>Point cloud, Image</td><td>Loosely Coupled</td><td>✓</td><td>✓</td><td>3D generation, 3D VQA</td><td>✗</td></tr></table>
149
+
150
+ Table 1: Taxonomy of Large Language Models with spatial reasoning capability. This table presents a comprehensive comparison of various 3D vision-language models categorized by their input modalities (image-based, point cloud-based, and hybrid-based), showing their data sources, alignment types, training strategies (pre-training and fine-tuning), primary tasks, and code availability. The models are organized into three main categories based on their input type: image-based models, point cloud-based models, and hybrid models that utilize both modalities.
151
+
152
+ for applications requiring detailed spatial understanding and precise control. However, they can be more complex to train and scale, and adding new modalities may require significant architectural changes. In contrast, loosely coupled approaches like MultiPLY and UniPoint-LLM provide greater modularity and flexibility, making them easier to extend and maintain. They allow independent optimization of different components and simplified training procedures, but may sacrifice some performance in tasks requiring fine-grained cross-modal understanding. The optimal choice ultimately depends on specific application requirements - tightly coupled architectures may be preferred for specialized high-performance systems, while loosely coupled designs better suit general-purpose platforms prioritizing extensibility and maintainability. Future work may explore hybrid approaches that combine the benefits of both paradigms, potentially using adaptive coupling mechanisms that adjust based on task demands.
153
+
154
+ # 4 Applications
155
+
156
+ A key research focus leverages LLMs to enhance robotic embodied intelligence, enabling machines to interpret natural language commands for real-world tasks. This includes robotic control, navigation, and manipulation, where LLMs parse instructions, generate action plans, and adapt to dynamic environments—for instance, guiding robots to locate objects in cluttered spaces using text-based prompts.
157
+
158
+ 3D Scene Understanding. Advanced 3D scene analysis integrates multimodal data (e.g., images, point clouds, text) for
159
+
160
+ tasks like open-vocabulary segmentation, semantic mapping, and spatial reasoning. Central to this is 3D visual question answering (3D-VQA), requiring models to interpret queries about object attributes, spatial relationships, or contextual roles within scenes. Context-aware systems further account for user perspectives to deliver precise responses.
161
+
162
+ Cross-Domain Applications. In healthcare, LLMs analyze volumetric medical scans (e.g., CT) for lesion detection and automated diagnostics. Autonomous driving systems utilize 3D-capable LLMs to interpret traffic scenes, aiding object detection [Zha et al., 2023, 2024] and path planning. Design-oriented applications include generating indoor layouts from textual requirements, while educational tools employ interactive 3D environments to teach spatial concepts.
163
+
164
+ # 5 Challenges and Future Directions
165
+
166
+ Table 1 summarizes the models that leverage LLMs to assist graph-related tasks according to the proposed taxonomy. Based on the above review and analysis, we believe that there is still much space for further enhancement in this field. Recent advances in integrating LLMs with three-dimensional (3D) data have demonstrated considerable promise. However, numerous challenges must still be overcome to realize robust and practical 3D-aware LLMs. Below, we summarize these obstacles and then outline potential pathways to address them, highlighting key research directions for the future.
167
+
168
+ # 5.1 Challenges
169
+
170
+ Weak Spatial Reasoning and Representation. Multimodal LLMs (MLLMs) exhibit limited acuity in 3D spatial understanding, struggling with fine-grained relationships (e.g., front/back distinctions, occluded object localization) and precise geometric outputs (distances, angles). These issues stem partly from mismatches between unstructured point clouds and sequence-based LLM architectures, where high-dimensional 3D data incur prohibitive token counts or oversimplified encodings.
171
+
172
+ Data and Evaluation Gaps. Progress in 3D-aware LLMs is hindered by the scarcity of high-quality 3D-text paired datasets. Unlike the abundant resources for 2D images and video, the 3D domain lacks standardized, richly annotated datasets crucial for training robust models. Existing benchmarks focus mainly on discriminative tasks like classification and retrieval—emphasizing category differentiation rather than generating rich, descriptive 3D scene outputs. Consequently, evaluations often rely on subjective metrics (e.g., human or GPT-based judgments) that can lack consistency. Advancing the field requires developing objective, comprehensive benchmarks that assess both open-vocabulary generation and the spatial plausibility of descriptions relative to the underlying 3D structure.
173
+
174
+ Multimodal Integration and Generalization. Fusing 3D data (e.g., point clouds) with other modalities like 2D imagery, audio, or text poses significant challenges due to their distinct structural characteristics. The conversion and alignment of high-dimensional 3D data with lower-dimensional representations can lead to a loss of intricate details, diluting the original 3D richness. Moreover, current models often struggle with open-vocabulary recognition, limiting their ability to identify or describe objects outside of their training data—especially when encountering unseen scenes or novel objects. This difficulty is further compounded by the variability of natural language, from colloquial expressions to domain-specific terminology, and by noisy inputs. Thus, more sophisticated multimodal integration techniques and generalization strategies are needed to preserve geometric fidelity while accommodating diverse, unpredictable inputs.
175
+
176
+ Complex Task Definition. While 3D-aware LLMs excel in controlled settings, they lack frameworks for nuanced language-context inference in dynamic environments. Task decomposition and scalable encoding methods are needed to balance geometric fidelity with computational tractability, particularly for interactive applications requiring real-time spatial reasoning.
177
+
178
+ # 5.2 Future Directions
179
+
180
+ Enhancing 3D Perception and Representations. Addressing spatial reasoning gaps requires richer 3D-text datasets (e.g., from robotics, gaming, autonomous driving) and model architectures that encode geometric relationships. Multi-view data and robust depth cues can improve orientation, distance, and occlusion estimation. Compact 3D tokens and refined encoding/decoding methods may bridge unstructured point
181
+
182
+ clouds with sequence-based models, enabling fine-grained spatial understanding and generation.
183
+
184
+ Multi-Modal Fusion and Instruction Understanding. Tighter integration of modalities (point clouds, images, text, audio) via unified latent spaces or attention mechanisms could preserve subtle geometric and semantic details. Enhanced instruction processing—including hierarchical task decomposition, contextual interpretation, and robustness to dialects/terminology—would improve compositional reasoning in 3D environments and broaden real-world applicability. Furthermore, by leveraging these integrated representations, models can more adeptly adapt to complex instructions and novel scenarios, ultimately paving the way for more robust and versatile 3D reasoning systems.
185
+
186
+ Cross-Scene Generalization and Robust Evaluation. Open-vocabulary 3D understanding demands large-scale pretraining on diverse scenes and transfer/lifelong learning paradigms for adapting to novel objects or environments. This understanding extends beyond predefined categories to generalize to unseen objects and scenes. For instance, models need to comprehend "an old rocking chair" even if this specific type of chair never appeared in the training data.
187
+
188
+ Expanding Applications for Autonomous Systems. 3D-aware LLMs hold potential in robotics (navigation, manipulation), medical imaging (lesion detection), architectural design, and interactive education. Future systems may integrate environmental constraints, user perspectives, and object affordances for autonomous planning and decision-making in dynamic 3D contexts.
189
+
190
+ Collectively, these challenges and potential directions underscore the field's rapid evolution and its equally significant open questions. Moving forward, more robust 3D-specific data resources, better model architectures, and more refined evaluation protocols will be essential to unlock the full potential of LLMs in three-dimensional settings—and ultimately bring intelligent, multimodal understanding closer to real-world deployment.
191
+
192
+ # 6 Conclusion
193
+
194
+ The integration of LLMs with 3D data is a dynamic research area. This survey categorized 3D-LLM research into image-based, point cloud-based, and hybrid modality-based spatial reasoning. It reviewed state-of-the-art methods, their applications in multiple fields, and associated challenges. Notably, image-based methods have data-related advantages but face issues like depth information shortage. Point cloud-based methods offer precise 3D details but encounter data-handling difficulties. Hybrid methods combine strengths yet struggle with data alignment. Applications are diverse, but challenges such as weak spatial perception, data scarcity, and evaluation problems exist. Future research should focus on enhancing 3D perception, improving multi-modal fusion, expanding generalization, developing evaluation metrics, enhancing instruction understanding, optimizing 3D representations, and exploring continuous learning. By addressing these, we can unlock the full potential of 3D-aware LLMs for real-world deployment and industry advancement.
195
+
196
+ # References
197
+
198
+ Fan Bai, Yuxin Du, Tiejun Huang, Max Q-H Meng, and Bo Zhao. M3d: Advancing 3d medical image analysis with multi-modal large language models. arXiv preprint arXiv:2404.00578, 2024.
199
+ Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465, 2024.
200
+ Hao Chen, Wei Zhao, Yingli Li, Tianyang Zhong, Yisong Wang, Youlan Shang, Lei Guo, Junwei Han, Tianming Liu, Jun Liu, et al. 3d-ct-gpt: Generating 3d radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330, 2024.
201
+ Sijin Chen, Xin Chen, Chi Zhang, Mingsheng Li, Gang Yu, Hao Fei, Hongyuan Zhu, Jiayuan Fan, and Tao Chen. Ll3da: Visual interactive instruction tuning for omni-3d understanding reasoning and planning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26428-26438, 2024.
202
+ Yilun Chen, Shuai Yang, Haifeng Huang, Tai Wang, Ruiyuan Lyu, Runsen Xu, Dahua Lin, and Jiangmiao Pang. Grounded 3d-llm with referent tokens. arXiv preprint arXiv:2405.10370, 2024.
203
+ Jiajun Deng, Tianyu He, Li Jiang, Tianyu Wang, Feras Dayoub, and Ian Reid. 3d-llava: Towards generalist 3d lmm's with omni superpoint transformer. arXiv preprint arXiv:2501.01163, 2025.
204
+ Junjie Fei, Mahmoud Ahmed, Jian Ding, Eslam Mohamed Bakr, and Mohamed Elhoseiny. Kestrel: Point grounding multimodal llm for part-aware 3d vision-language understanding. arXiv preprint arXiv:2405.18937, 2024.
205
+ Rao Fu, Jingyu Liu, Xilun Chen, Yixin Nie, and Wenhan Xiong. Scene-llm: Extending language model for 3d visual understanding and reasoning. arXiv preprint arXiv:2403.11401, 2024.
206
+ Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. Embodiedcity: A benchmark platform for embodied agent in real-world city environment. arXiv preprint arXiv:2410.09604, 2024.
207
+ Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Rama Chellappa, et al. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 5021-5028. IEEE, 2024.
208
+ Ziyu Guo, Renrui Zhang, Xiangyang Zhu, Yiwen Tang, Xi-anzheng Ma, Jiaming Han, Kexin Chen, Peng Gao, Xi-anzhi Li, Hongsheng Li, et al. Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following. arXiv preprint arXiv:2309.00615, 2023.
209
+
210
+ Shuting He, Henghui Ding, Xudong Jiang, and Bihan Wen. Segpoint: Segment any point cloud via large language model. In European Conference on Computer Vision, pages 349-367. Springer, 2024.
211
+ Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494, 2023.
212
+ Yining Hong, Zishuo Zheng, Peihao Chen, Yian Wang, Junyan Li, and Chuang Gan. Multiply: A multisensory object-centric embodied large language model in 3d world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26406-26416, 2024.
213
+ Haifeng Huang, Yilun Chen, Zehan Wang, Rongjie Huang, Runsen Xu, Tai Wang, Luping Liu, Xize Cheng, Yang Zhao, Jiangmiao Pang, et al. Chat-scene: Bridging 3d scene and large language models with object identifiers. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.
214
+ Jiayi Ji, Haowei Wang, Changli Wu, Yiwei Ma, Xiaoshuai Sun, and Rongrong Ji. Jm3d & jm3d-llm: Elevating 3d representation with joint multi-modal cues. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.
215
+ Baoxiong Jia, Yixin Chen, Huangyue Yu, Yan Wang, Xuesong Niu, Tengyu Liu, Qing Li, and Siyuan Huang. Sceneverse: Scaling 3d vision-language learning for grounded scene understanding. In European Conference on Computer Vision, pages 289-310. Springer, 2024.
216
+ Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019.
217
+ Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR, 2022.
218
+ Dingning Liu, Xiaoshui Huang, Zhihui Wang, Zhenfei Yin, Peng Gao, Yujiao Wu, Yuenan Hou, Xinzhu Ma, and Wanli Ouyang. Pointmllm: Aligning multi-modality with llm for point cloud understanding, generation and editing.
219
+ Che Liu, Zhongwei Wan, Yuqi Wang, Hui Shen, Haozhe Wang, Kangyu Zheng, Mi Zhang, and Rossella Arcucci. Benchmarking and boosting radiology report generation for 3d high-resolution medical images. arXiv preprint arXiv:2406.07146, 2024.
220
+ Dingning Liu, Xiaoshui Huang, Yuenan Hou, Zhihui Wang, Zhenfei Yin, Yongshun Gong, Peng Gao, and Wanli Ouyang. Uni3d-llm: Unifying point cloud perception, generation and editing with large language models. arXiv preprint arXiv:2402.03327, 2024.
221
+ Chenyang Ma, Kai Lu, Ta-Ying Cheng, Niki Trigoni, and Andrew Markham. Spatialpin: Enhancing spatial reasoning
222
+
223
+ capabilities of vision-language models through prompting and interacting 3d priors. arXiv preprint arXiv:2403.13438, 2024.
224
+ Xianzheng Ma, Yash Bhalgat, Brandon Smart, Shuai Chen, Xinghui Li, Jian Ding, Jindong Gu, Dave Zhenyu Chen, Songyou Peng, Jia-Wang Bian, et al. When llms step into the 3d world: A survey and meta-analysis of 3d tasks via multi-modal large language models. arXiv preprint arXiv:2405.10255, 2024.
225
+ Yuexin Ma, Tai Wang, Xuyang Bai, Huitong Yang, Yuenan Hou, Yaming Wang, Yu Qiao, Ruigang Yang, and Xinge Zhu. Vision-centric bev perception: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.
226
+ Yunze Man, Liang-Yan Gui, and Yu-Xiong Wang. Situational awareness matters in 3d vision language reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13678-13688, 2024.
227
+ Zekun Qi, Runpei Dong, Shaochen Zhang, Haoran Geng, Chunrui Han, Zheng Ge, Li Yi, and Kaiheng Ma. Shapellm: Universal 3d object understanding for embodied interaction. In European Conference on Computer Vision, pages 214-238. Springer, 2024.
228
+ Zhangyang Qi, Ye Fang, Zeyi Sun, Xiaoyang Wu, Tong Wu, Jiaqi Wang, Dahua Lin, and Hengshuang Zhao. Gpt4point: A unified framework for point-language understanding and generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26417-26427, 2024.
229
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.
230
+ Alec Radford. Improving language understanding by generative pre-training. 2018.
231
+ Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Yixue Hao, Long Hu, and Min Chen. Minigpt-3d: Efficiently aligning 3d point clouds with large language models using 2d priors. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 6617-6626, 2024.
232
+ Yuan Tang, Xu Han, Xianzhi Li, Qiao Yu, Jinfeng Xu, Yixue Hao, Long Hu, and Min Chen. More text, less point: Towards 3d data-efficient point-language understanding. arXiv preprint arXiv:2408.15966, 2024.
233
+ Zehan Wang, Haifeng Huang, Yang Zhao, Ziang Zhang, and Zhou Zhao. Chat-3d: Data-efficiently tuning large language model for universal dialogue of 3d scenes. arXiv preprint arXiv:2308.08769, 2023.
234
+ Xiaosong Wang, Xiaofan Zhang, Guotai Wang, Junjun He, Zhongyu Li, Wentao Zhu, Yi Guo, Qi Dou, Xiaoxiao Li, Dequan Wang, et al. Openmedlab: An open-source platform for multi-modality foundation models in medicine. arXiv preprint arXiv:2402.18028, 2024.
235
+
236
+ Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. Pointllm: Empowering large language models to understand point clouds. In European Conference on Computer Vision, pages 131-147. Springer, 2025.
237
+ Senqiao Yang, Jiaming Liu, Ray Zhang, Mingjie Pan, Zoey Guo, Xiaqi Li, Zehui Chen, Peng Gao, Yandong Guo, and Shanghang Zhang. Lidar-llm: Exploring the potential of large language models for 3d lidar understanding. arXiv preprint arXiv:2312.14074, 2023.
238
+ Fan Yang, Sicheng Zhao, Yanhao Zhang, Haoxiang Chen, Hui Chen, Wenbo Tang, Haonan Lu, Pengfei Xu, Zhenyu Yang, Jungong Han, et al. Llmi3d: Empowering llm with 3d perception from a single 2d image. arXiv preprint arXiv:2408.07422, 2024.
239
+ Zhihao Yuan, Jinke Ren, Chun-Mei Feng, Hengshuang Zhao, Shuguang Cui, and Zhen Li. Visual programming for zero-shot open-vocabulary 3d visual grounding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20623-20633, 2024.
240
+ Jirong Zha, Liang Han, Xiwang Dong, and Zhang Ren. Privacy-preserving push-sum distributed cubature information filter for nonlinear target tracking with switching directed topologies. ISA transactions, 136:16-30, 2023.
241
+ Jirong Zha, Nan Zhou, Zhenyu Liu, Tao Sun, and Xinlei Chen. Diffusion-based filter for fast and accurate collaborative tracking with low data transmission. Authorea Preprints, 2024.
242
+ Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562, 2022.
243
+ Sha Zhang, Di Huang, Jiajun Deng, Shixiang Tang, Wanli Ouyang, Tong He, and Yanyong Zhang. Agent3d-zero: An agent for zero-shot 3d understanding. In European Conference on Computer Vision, pages 186-202. Springer, 2024.
244
+ Junsheng Zhou, Jinsheng Wang, Baorui Ma, Yu-Shen Liu, Tiejun Huang, and Xinlong Wang. Uni3d: Exploring unified 3d representation at scale. arXiv preprint arXiv:2310.06773, 2023.
245
+ Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2639-2650, 2023.
246
+ Chenming Zhu, Tai Wang, Wenwei Zhang, Kai Chen, and Xihui Liu. Scanreason: Empowering 3d visual grounding with reasoning capabilities. In European Conference on Computer Vision, pages 151-168. Springer, 2024.
247
+ Chenming Zhu, Tai Wang, Wenwei Zhang, Jiangmiao Pang, and Xihui Liu. Llava-3d: A simple yet effective pathway to empowering lmm with 3d-awareness. arXiv preprint arXiv:2409.18125, 2024.
data/2025/2504_05xxx/2504.05786/images/01a7846f1deba904180c76b95b69058c7981b82fbb85fd782f363da73b4c3476.jpg ADDED

Git LFS Details

  • SHA256: e93860ae5b5285bcde1b129f2fc4a7eeccd2eafe14e53d2218a60ae2bdc9c363
  • Pointer size: 130 Bytes
  • Size of remote file: 54.3 kB
data/2025/2504_05xxx/2504.05786/images/10f4e27138d77cef1e66632497ab60fcb460eb82533892d1e5d74ab2bb75012d.jpg ADDED

Git LFS Details

  • SHA256: fafdce046b07db2130a4d77ad192c867c1c8bc5c560533412ad785cbcc0b0bda
  • Pointer size: 131 Bytes
  • Size of remote file: 156 kB
data/2025/2504_05xxx/2504.05786/images/1bf7f57265e65d413e42b9341fc93ceead1023f71cc6b80ce679fd74fcaec139.jpg ADDED

Git LFS Details

  • SHA256: 7faa58b8e9f594ff92b3615e5d9b257b1a8bd60c0c5d7b29f2e1c8685be56d90
  • Pointer size: 129 Bytes
  • Size of remote file: 3.57 kB
data/2025/2504_05xxx/2504.05786/images/1c4b8c1a8e39901fceb895fcf642206dfa4b227423055dcf9d75196664ca28d0.jpg ADDED

Git LFS Details

  • SHA256: ae23458659e5dcebfb13e5dae6e5018570a29ee651e295cfd240d3e105481a81
  • Pointer size: 130 Bytes
  • Size of remote file: 36.3 kB
data/2025/2504_05xxx/2504.05786/images/6f283c8ccff38019b629c7d3baf89d8f01eae9a6757bd3db846940b8dbae1d64.jpg ADDED

Git LFS Details

  • SHA256: a00e372f4b4b6d00c90e78867e1efcad5798514593ae0a191b7927f8014d815f
  • Pointer size: 130 Bytes
  • Size of remote file: 43.3 kB
data/2025/2504_05xxx/2504.05786/images/d940e65bdb4924227a21274aee6d75d1ec4fbdde1933224aff9d92e6f49a75fb.jpg ADDED

Git LFS Details

  • SHA256: bd38a107ee38656537461d7db0d98b1c3d4dabe27d5eb4cca474ca09472f5d25
  • Pointer size: 131 Bytes
  • Size of remote file: 235 kB
data/2025/2504_05xxx/2504.05786/images/e0e4442a4b46313dc44cf0f5c9bff8b0f952923527a0c8287f8b90fc3e5eaf75.jpg ADDED

Git LFS Details

  • SHA256: afae7feeaf22865158d13633ae99dc1da58536637487f848012405bba649167b
  • Pointer size: 129 Bytes
  • Size of remote file: 2.43 kB
data/2025/2504_05xxx/2504.05786/images/f869e9459876e49023165cdcf85439fb6449fd17975fe58ef81ce28fb4e6e702.jpg ADDED

Git LFS Details

  • SHA256: 45aafc26c117d72769965f336d7dde1eb862d3fe7c5ce511c80adc9e733cefa1
  • Pointer size: 130 Bytes
  • Size of remote file: 58.5 kB
data/2025/2504_05xxx/2504.05786/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_content_list.json ADDED
@@ -0,0 +1,1238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Pinching-Antenna Assisted ISAC: A CRLB Perspective",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 124,
8
+ 41,
9
+ 872,
10
+ 69
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Zhiguo Ding, Fellow, IEEE",
17
+ "bbox": [
18
+ 390,
19
+ 79,
20
+ 599,
21
+ 95
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.",
28
+ "bbox": [
29
+ 73,
30
+ 114,
31
+ 491,
32
+ 281
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory.",
39
+ "bbox": [
40
+ 73,
41
+ 282,
42
+ 491,
43
+ 321
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "I. INTRODUCTION",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 215,
53
+ 325,
54
+ 349,
55
+ 339
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1].",
62
+ "bbox": [
63
+ 73,
64
+ 345,
65
+ 491,
66
+ 541
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14].",
73
+ "bbox": [
74
+ 73,
75
+ 542,
76
+ 491,
77
+ 844
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching",
84
+ "bbox": [
85
+ 73,
86
+ 845,
87
+ 491,
88
+ 906
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE.",
95
+ "bbox": [
96
+ 73,
97
+ 907,
98
+ 491,
99
+ 931
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.",
106
+ "bbox": [
107
+ 501,
108
+ 114,
109
+ 924,
110
+ 372
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "II. SYSTEM MODEL",
117
+ "text_level": 1,
118
+ "bbox": [
119
+ 640,
120
+ 388,
121
+ 785,
122
+ 402
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "Consider a pinching-antenna system that is deployed to provide ISAC services to $M$ single-antenna users, denoted by $\\mathrm{U}_m$ . Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that $N$ pinching antennas are activated on $N_{\\mathrm{WG}}$ waveguides. The location of the $n$ -th pinching antenna is denoted by $\\psi_n^{\\mathrm{Pin}} = (x_n^{\\mathrm{Pin}}, y_n^{\\mathrm{Pin}}, d_{\\mathrm{H}})$ , where $d_{\\mathrm{H}}$ denotes the height of the waveguides.",
129
+ "bbox": [
130
+ 501,
131
+ 412,
132
+ 921,
133
+ 564
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "The service area is denoted by $\\mathcal{A}$ and is assumed to be a rectangle with its two sides denoted by $D_{\\mathrm{W}}$ and $D_{\\mathrm{L}}$ , respectively, and its center located at $(0,0,0)$ . The users are assumed to be uniformly distributed in $\\mathcal{A}$ , and $\\mathrm{U}_m$ 's location is denoted by $\\psi_m = (x_m,y_m,0)$ .",
140
+ "bbox": [
141
+ 503,
142
+ 565,
143
+ 921,
144
+ 641
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Denote the distance from the $n$ -th pinching antenna to the $m$ -th user by $d_{mn}$ . Distance (range) estimates for the $m$ -th user can be modeled as follows: [17]",
151
+ "bbox": [
152
+ 503,
153
+ 641,
154
+ 921,
155
+ 686
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "equation",
161
+ "text": "\n$$\n\\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \\tag {1}\n$$\n",
162
+ "text_format": "latex",
163
+ "bbox": [
164
+ 640,
165
+ 696,
166
+ 919,
167
+ 714
168
+ ],
169
+ "page_idx": 0
170
+ },
171
+ {
172
+ "type": "text",
173
+ "text": "where $d_{mn} = \\sqrt{(x_m - x_n^{\\mathrm{Pin}})^2 + (y_m - y_n^{\\mathrm{Pin}})^2 + d_{\\mathrm{H}}^2}$ , and $w_{mn}$ is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.,",
174
+ "bbox": [
175
+ 503,
176
+ 727,
177
+ 921,
178
+ 779
179
+ ],
180
+ "page_idx": 0
181
+ },
182
+ {
183
+ "type": "equation",
184
+ "text": "\n$$\n\\sigma_ {m n} ^ {2} = K _ {E} \\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right), \\tag {2}\n$$\n",
185
+ "text_format": "latex",
186
+ "bbox": [
187
+ 524,
188
+ 787,
189
+ 919,
190
+ 811
191
+ ],
192
+ "page_idx": 0
193
+ },
194
+ {
195
+ "type": "text",
196
+ "text": "$K_{E}$ denotes a system parameter decided by the range estimation environment.",
197
+ "bbox": [
198
+ 503,
199
+ 820,
200
+ 919,
201
+ 849
202
+ ],
203
+ "page_idx": 0
204
+ },
205
+ {
206
+ "type": "text",
207
+ "text": "III. IMPACT OF PINCHING ANTENNAS ON POSITIONING",
208
+ "text_level": 1,
209
+ "bbox": [
210
+ 514,
211
+ 871,
212
+ 906,
213
+ 885
214
+ ],
215
+ "page_idx": 0
216
+ },
217
+ {
218
+ "type": "text",
219
+ "text": "A. CRLB Achieved by Pinching-Antenna Systems",
220
+ "text_level": 1,
221
+ "bbox": [
222
+ 503,
223
+ 891,
224
+ 841,
225
+ 907
226
+ ],
227
+ "page_idx": 0
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "Without loss of generality, the impact of pinching antennas on $\\mathrm{U}_m$ 's localization is focused on. The joint probability den",
232
+ "bbox": [
233
+ 503,
234
+ 914,
235
+ 921,
236
+ 945
237
+ ],
238
+ "page_idx": 0
239
+ },
240
+ {
241
+ "type": "page_number",
242
+ "text": "1",
243
+ "bbox": [
244
+ 911,
245
+ 30,
246
+ 919,
247
+ 40
248
+ ],
249
+ "page_idx": 0
250
+ },
251
+ {
252
+ "type": "aside_text",
253
+ "text": "arXiv:2504.05792v1 [cs.IT] 8 Apr 2025",
254
+ "bbox": [
255
+ 22,
256
+ 255,
257
+ 58,
258
+ 676
259
+ ],
260
+ "page_idx": 0
261
+ },
262
+ {
263
+ "type": "text",
264
+ "text": "sity function (pdf) of $\\hat{d}_{mn}$ conditioned on $d_{mn}$ , $1\\leq n\\leq N$ , is given by",
265
+ "bbox": [
266
+ 73,
267
+ 66,
268
+ 491,
269
+ 99
270
+ ],
271
+ "page_idx": 1
272
+ },
273
+ {
274
+ "type": "equation",
275
+ "text": "\n$$\nf (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = \\prod_ {n = 1} ^ {N} \\frac {1}{\\sqrt {2 \\pi \\sigma_ {m n} ^ {2}}} e ^ {- \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}}, \\tag {3}\n$$\n",
276
+ "text_format": "latex",
277
+ "bbox": [
278
+ 102,
279
+ 104,
280
+ 491,
281
+ 147
282
+ ],
283
+ "page_idx": 1
284
+ },
285
+ {
286
+ "type": "text",
287
+ "text": "whose log-likelihood function is given by",
288
+ "bbox": [
289
+ 75,
290
+ 151,
291
+ 359,
292
+ 167
293
+ ],
294
+ "page_idx": 1
295
+ },
296
+ {
297
+ "type": "equation",
298
+ "text": "\n$$\n\\begin{array}{l} L \\triangleq \\ln f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = - \\frac {N}{2} \\ln (2 \\pi) \\tag {4} \\\\ - \\sum_ {n = 1} ^ {N} \\ln \\sigma_ {m n} - \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}. \\\\ \\end{array}\n$$\n",
299
+ "text_format": "latex",
300
+ "bbox": [
301
+ 142,
302
+ 172,
303
+ 488,
304
+ 244
305
+ ],
306
+ "page_idx": 1
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "Recall that the CRLB for $x_{m}$ and $y_{m}$ is given by",
311
+ "bbox": [
312
+ 75,
313
+ 250,
314
+ 413,
315
+ 265
316
+ ],
317
+ "page_idx": 1
318
+ },
319
+ {
320
+ "type": "equation",
321
+ "text": "\n$$\n\\mathcal {E} \\left\\{\\left(\\hat {x} _ {m} - x _ {m}\\right) ^ {2} + \\left(\\hat {y} _ {m} - y _ {m}\\right) ^ {2} \\right\\} \\geq \\frac {1}{J _ {x} ^ {m}} + \\frac {1}{J _ {y} ^ {m}} \\triangleq \\mathrm {C R B} _ {m}, \\tag {5}\n$$\n",
322
+ "text_format": "latex",
323
+ "bbox": [
324
+ 91,
325
+ 268,
326
+ 488,
327
+ 299
328
+ ],
329
+ "page_idx": 1
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "where $\\hat{x}_m$ and $\\hat{y}_m$ denote the estimates of $x_m$ and $y_m$ , respectively, $J_x^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial x_m^2}\\right\\}$ and $J_y^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial y_m^2}\\right\\}$ .",
334
+ "bbox": [
335
+ 75,
336
+ 304,
337
+ 488,
338
+ 340
339
+ ],
340
+ "page_idx": 1
341
+ },
342
+ {
343
+ "type": "text",
344
+ "text": "$\\frac{\\partial L}{\\partial x_m}$ can be obtained as follows:",
345
+ "bbox": [
346
+ 91,
347
+ 339,
348
+ 315,
349
+ 358
350
+ ],
351
+ "page_idx": 1
352
+ },
353
+ {
354
+ "type": "equation",
355
+ "text": "\n$$\n\\begin{array}{l} \\frac {\\partial L}{\\partial x _ {m}} = - \\sum_ {n = 1} ^ {N} \\frac {1}{\\sigma_ {m n}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}} - \\sum_ {n = 1} ^ {N} \\frac {\\left(d _ {m n} - \\hat {d} _ {m n}\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\partial d _ {m n}}{\\partial x _ {m}} \\tag {6} \\\\ + \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\\sigma_ {m n} ^ {3}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}}. \\\\ \\end{array}\n$$\n",
356
+ "text_format": "latex",
357
+ "bbox": [
358
+ 91,
359
+ 364,
360
+ 488,
361
+ 448
362
+ ],
363
+ "page_idx": 1
364
+ },
365
+ {
366
+ "type": "text",
367
+ "text": "The expression of $\\frac{\\partial^2L}{\\partial x_m^2}$ is quite invoked; however, by using the fact that $\\mathcal{E}\\{\\hat{d}_{mn} - d_{mn}\\} = 0$ and following the steps similar to those in [17], the expectation of $\\frac{\\partial^2L}{\\partial x_m^2}$ , i.e., $J_x^m$ , can be obtained as follows:",
368
+ "bbox": [
369
+ 73,
370
+ 455,
371
+ 491,
372
+ 521
373
+ ],
374
+ "page_idx": 1
375
+ },
376
+ {
377
+ "type": "equation",
378
+ "text": "\n$$\nJ _ {x} ^ {m} = \\sum_ {n = 1} ^ {N} \\frac {\\left(2 K _ {E} + 1\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}}{\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}}. \\tag {7}\n$$\n",
379
+ "text_format": "latex",
380
+ "bbox": [
381
+ 81,
382
+ 526,
383
+ 488,
384
+ 580
385
+ ],
386
+ "page_idx": 1
387
+ },
388
+ {
389
+ "type": "text",
390
+ "text": "$J_{y}^{m}$ can be obtained in a similar form, which means that the CRLB for estimating $\\mathrm{U}_m$ 's location can be expressed as follows:",
391
+ "bbox": [
392
+ 73,
393
+ 588,
394
+ 491,
395
+ 633
396
+ ],
397
+ "page_idx": 1
398
+ },
399
+ {
400
+ "type": "equation",
401
+ "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {8} \\\\ \\end{array}\n$$\n",
402
+ "text_format": "latex",
403
+ "bbox": [
404
+ 76,
405
+ 637,
406
+ 488,
407
+ 752
408
+ ],
409
+ "page_idx": 1
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "B. Performance Analysis Based on CRLB",
414
+ "text_level": 1,
415
+ "bbox": [
416
+ 75,
417
+ 770,
418
+ 359,
419
+ 785
420
+ ],
421
+ "page_idx": 1
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at $(0,0,0)$ and its radius being $\\frac{\\lambda}{4\\sin\\left(\\frac{\\pi}{N}\\right)}$ , which ensures that the minimal pairwise distance of the antennas is $\\frac{\\lambda}{2}$ , where $\\lambda$ denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:",
426
+ "bbox": [
427
+ 73,
428
+ 787,
429
+ 491,
430
+ 900
431
+ ],
432
+ "page_idx": 1
433
+ },
434
+ {
435
+ "type": "equation",
436
+ "text": "\n$$\n\\Delta_ {\\mathrm {C R B}} = \\int_ {- \\frac {D _ {\\mathrm {L}}}{2}} ^ {\\frac {D _ {\\mathrm {L}}}{2}} \\int_ {- \\frac {D _ {\\mathrm {W}}}{2}} ^ {\\frac {D _ {\\mathrm {W}}}{2}} \\left(\\mathrm {C R B} _ {m} - \\mathrm {C R B} _ {m} ^ {\\text {C o n v}}\\right) \\frac {d y _ {m}}{D _ {\\mathrm {W}}} \\frac {d x _ {m}}{D _ {\\mathrm {L}}}, \\tag {9}\n$$\n",
437
+ "text_format": "latex",
438
+ "bbox": [
439
+ 91,
440
+ 902,
441
+ 488,
442
+ 943
443
+ ],
444
+ "page_idx": 1
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "where $\\mathrm{CRB}_m^{\\mathrm{Conv}}$ can be obtained similarly to $\\mathrm{CRB}_m$ by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of $\\Delta_{\\mathrm{CRB}}$ is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at $\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)$ . The use of conventional antennas can achieve the following CRLB:",
449
+ "bbox": [
450
+ 501,
451
+ 66,
452
+ 921,
453
+ 220
454
+ ],
455
+ "page_idx": 1
456
+ },
457
+ {
458
+ "type": "equation",
459
+ "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} ^ {\\mathrm {C o n v}} = \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right) \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\\\ \\stackrel {(a)} {\\approx} \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {4 \\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{N D _ {\\mathrm {L}} ^ {2}} + \\frac {\\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\sum_ {n = 1} ^ {N} (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2}}\\right) \\\\ \\xrightarrow {(b)} \\infty , \\tag {10} \\\\ \\end{array}\n$$\n",
460
+ "text_format": "latex",
461
+ "bbox": [
462
+ 526,
463
+ 227,
464
+ 919,
465
+ 468
466
+ ],
467
+ "page_idx": 1
468
+ },
469
+ {
470
+ "type": "text",
471
+ "text": "where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that $|y_{n}^{\\mathrm{Conv}}| \\to 0$ for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths).",
472
+ "bbox": [
473
+ 501,
474
+ 478,
475
+ 919,
476
+ 553
477
+ ],
478
+ "page_idx": 1
479
+ },
480
+ {
481
+ "type": "text",
482
+ "text": "On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at $\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)$ , the corresponding CRLB can be expressed as follows:",
483
+ "bbox": [
484
+ 503,
485
+ 554,
486
+ 921,
487
+ 614
488
+ ],
489
+ "page_idx": 1
490
+ },
491
+ {
492
+ "type": "equation",
493
+ "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {P i n}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {11} \\\\ \\end{array}\n$$\n",
494
+ "text_format": "latex",
495
+ "bbox": [
496
+ 511,
497
+ 623,
498
+ 919,
499
+ 782
500
+ ],
501
+ "page_idx": 1
502
+ },
503
+ {
504
+ "type": "text",
505
+ "text": "For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:",
506
+ "bbox": [
507
+ 503,
508
+ 789,
509
+ 919,
510
+ 821
511
+ ],
512
+ "page_idx": 1
513
+ },
514
+ {
515
+ "type": "equation",
516
+ "text": "\n$$\n\\begin{array}{l} \\mathrm {C R B} _ {m} \\leq \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}} \\right. \\\\ \\left. + \\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2}}\\right), \\tag {12} \\\\ \\end{array}\n$$\n",
517
+ "text_format": "latex",
518
+ "bbox": [
519
+ 511,
520
+ 828,
521
+ 919,
522
+ 941
523
+ ],
524
+ "page_idx": 1
525
+ },
526
+ {
527
+ "type": "page_number",
528
+ "text": "2",
529
+ "bbox": [
530
+ 911,
531
+ 30,
532
+ 919,
533
+ 40
534
+ ],
535
+ "page_idx": 1
536
+ },
537
+ {
538
+ "type": "text",
539
+ "text": "where $n$ is an arbitrary integer between 1 and $N$ . Because of the diverse locations of the $N$ pinching antennas, it is always possible to find $n \\in \\{1, \\dots, N\\}$ which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded.",
540
+ "bbox": [
541
+ 73,
542
+ 68,
543
+ 491,
544
+ 145
545
+ ],
546
+ "page_idx": 2
547
+ },
548
+ {
549
+ "type": "text",
550
+ "text": "Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users.",
551
+ "bbox": [
552
+ 73,
553
+ 145,
554
+ 491,
555
+ 205
556
+ ],
557
+ "page_idx": 2
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are $\\tilde{N} = \\frac{N}{N_{\\mathrm{WG}}}$ pinching antennas on each waveguide. Denote the location of the $n$ -th antenna on the $i$ -th waveguide by $\\psi_{in}^{\\mathrm{Pin}} = (x_{in}^{\\mathrm{Pin}},y_{in}^{\\mathrm{Pin}},d_{\\mathrm{H}})$ . Furthermore, assume that the antennas are equally spaced, and define $\\Delta_x = |x_{in}^{\\mathrm{Pin}} - x_{im}^{\\mathrm{Pin}}|$ and $\\Delta_y = |x_{in}^{\\mathrm{Pin}} - x_{jn}^{\\mathrm{Pin}}|$ , $m\\neq n$ and $i\\neq j$ .",
562
+ "bbox": [
563
+ 73,
564
+ 205,
565
+ 491,
566
+ 372
567
+ ],
568
+ "page_idx": 2
569
+ },
570
+ {
571
+ "type": "text",
572
+ "text": "For illustrative purposes, assume that all $N$ pinching antennas are activated in a square area with $\\mathrm{U}_m$ at its center, where $\\tilde{N} = N_{\\mathrm{WG}}$ and $\\Delta_x = \\Delta_y = \\Delta$ . This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define $\\bar{N} = \\frac{\\tilde{N}}{2}$ , and without loss of generality, assume that $\\bar{N}$ is an even number.",
573
+ "bbox": [
574
+ 73,
575
+ 371,
576
+ 491,
577
+ 476
578
+ ],
579
+ "page_idx": 2
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "With these assumptions, the CRLB in (8) can be simplified as follows:",
584
+ "bbox": [
585
+ 73,
586
+ 478,
587
+ 491,
588
+ 508
589
+ ],
590
+ "page_idx": 2
591
+ },
592
+ {
593
+ "type": "equation",
594
+ "text": "\n$$\n\\mathrm {C R B} _ {m} = \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(n - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}} + \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(i - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}},\n$$\n",
595
+ "text_format": "latex",
596
+ "bbox": [
597
+ 94,
598
+ 515,
599
+ 472,
600
+ 566
601
+ ],
602
+ "page_idx": 2
603
+ },
604
+ {
605
+ "type": "text",
606
+ "text": "where $= \\left(n - \\frac{1}{2}\\right)^{2} + \\left(i - \\frac{1}{2}\\right)^{2} + \\frac{d_{\\mathrm{H}}^{2}}{\\Delta^{2}}$ . The above CRLB can be used to design the antenna placement, i.e., the optimal choice of $\\Delta$ for minimizing the CRLB. Computer simulations can be used to verify that $\\frac{\\partial^2\\mathrm{CRB}_m}{\\partial\\Delta^2} > 0$ , i.e., $\\mathrm{CRB}_m$ is a convex function of $\\Delta$ , and hence convex optimization solvers can be used to find the optimal solution of $\\Delta$ efficiently. To obtain an insightful understanding of the optimal choice of $\\Delta$ , a special case with $N = 4$ is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with $N = 4$ , the CRLB can be simplified as follows:",
607
+ "bbox": [
608
+ 73,
609
+ 577,
610
+ 491,
611
+ 763
612
+ ],
613
+ "page_idx": 2
614
+ },
615
+ {
616
+ "type": "equation",
617
+ "text": "\n$$\n\\mathrm {C R B} _ {m} = \\frac {2 K _ {E} \\Delta^ {2}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{2} + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right) ^ {2}, \\tag {13}\n$$\n",
618
+ "text_format": "latex",
619
+ "bbox": [
620
+ 156,
621
+ 772,
622
+ 491,
623
+ 809
624
+ ],
625
+ "page_idx": 2
626
+ },
627
+ {
628
+ "type": "text",
629
+ "text": "whose first-order derivative is given by",
630
+ "bbox": [
631
+ 73,
632
+ 816,
633
+ 344,
634
+ 832
635
+ ],
636
+ "page_idx": 2
637
+ },
638
+ {
639
+ "type": "equation",
640
+ "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial \\Delta} = \\frac {4 K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{2} \\Delta + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta}\\right) \\left(\\frac {1}{2} - \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right). \\tag {14}\n$$\n",
641
+ "text_format": "latex",
642
+ "bbox": [
643
+ 94,
644
+ 840,
645
+ 491,
646
+ 876
647
+ ],
648
+ "page_idx": 2
649
+ },
650
+ {
651
+ "type": "text",
652
+ "text": "The second-order derivative of $\\mathrm{CRB}_m$ is given by",
653
+ "bbox": [
654
+ 73,
655
+ 883,
656
+ 419,
657
+ 898
658
+ ],
659
+ "page_idx": 2
660
+ },
661
+ {
662
+ "type": "equation",
663
+ "text": "\n$$\n\\frac {\\partial^ {2} \\mathrm {C R B} _ {m}}{\\partial \\Delta^ {2}} = \\frac {4 K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{4} + 3 \\frac {d _ {\\mathrm {H}} ^ {4}}{\\Delta^ {4}}\\right) > 0, \\tag {15}\n$$\n",
664
+ "text_format": "latex",
665
+ "bbox": [
666
+ 135,
667
+ 907,
668
+ 491,
669
+ 941
670
+ ],
671
+ "page_idx": 2
672
+ },
673
+ {
674
+ "type": "text",
675
+ "text": "which means that $\\mathrm{CRB}_m$ is a convex function of $\\Delta$ . Therefore, the optimal solution of $\\Delta$ for minimizing the CRLB for the special case with $N = 4$ is given by",
676
+ "bbox": [
677
+ 503,
678
+ 68,
679
+ 921,
680
+ 114
681
+ ],
682
+ "page_idx": 2
683
+ },
684
+ {
685
+ "type": "equation",
686
+ "text": "\n$$\n\\Delta^ {*} = \\sqrt {2} d _ {H}. \\tag {16}\n$$\n",
687
+ "text_format": "latex",
688
+ "bbox": [
689
+ 665,
690
+ 125,
691
+ 921,
692
+ 142
693
+ ],
694
+ "page_idx": 2
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., $\\Delta^{*} \\to 0$ (or $\\frac{\\lambda}{2}$ to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides.",
699
+ "bbox": [
700
+ 503,
701
+ 152,
702
+ 921,
703
+ 228
704
+ ],
705
+ "page_idx": 2
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, $\\psi_{n}^{\\mathrm{Pin}}$ , there exists a local maximum of $\\mathrm{CRB}_m$ shown in (8). This local-maximum property can be revealed by studying $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}$ and $\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}$ . Without loss of generality, $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}$ is focused, and can be expressed as follows:",
710
+ "bbox": [
711
+ 503,
712
+ 228,
713
+ 921,
714
+ 309
715
+ ],
716
+ "page_idx": 2
717
+ },
718
+ {
719
+ "type": "equation",
720
+ "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(- \\frac {1}{\\gamma_ {1} ^ {2}} [ \\gamma_ {2} - \\gamma_ {3} ] + \\frac {1}{\\gamma_ {4} ^ {2}} \\gamma_ {5}\\right), \\tag {17}\n$$\n",
721
+ "text_format": "latex",
722
+ "bbox": [
723
+ 524,
724
+ 316,
725
+ 921,
726
+ 352
727
+ ],
728
+ "page_idx": 2
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "where $d_{mn}^2 = \\left(x_m - x_n^{\\mathrm{Pin}}\\right)^2 +\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2 +d_{\\mathrm{H}}^2,$ $\\gamma_{1} = \\sum_{n = 1}^{N}\\frac{\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)^{2}}{d_{mn}^{4}},\\gamma_{2} = \\sum_{n = 1}^{N}\\frac{2\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)}{d_{mn}^{4}},\\gamma_{3} =$ $\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)^3}{d_{mn}^6},\\gamma_4 = \\sum_{n = 1}^{N}\\frac{\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^2},\\mathrm{and}\\gamma_5 = \\end{array}$ $\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^6}. \\end{array}$",
733
+ "bbox": [
734
+ 503,
735
+ 361,
736
+ 921,
737
+ 452
738
+ ],
739
+ "page_idx": 2
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "Without loss of generality, assume that $\\mathrm{U}_m$ is in the proximity of the first pinching antenna on the first waveguide, i.e., $x_{m} = x_{11}^{\\mathrm{Pin}} + \\delta_{x}$ and $y_{m} = y_{11}^{\\mathrm{Pin}} + \\delta_{y}$ , where $\\delta_x\\to 0$ and $\\delta_y\\rightarrow 0$ . In this case, $\\gamma_{1}$ in (17) can be approximated as follows:",
744
+ "bbox": [
745
+ 503,
746
+ 452,
747
+ 921,
748
+ 525
749
+ ],
750
+ "page_idx": 2
751
+ },
752
+ {
753
+ "type": "equation",
754
+ "text": "\n$$\n\\gamma_ {1} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n ^ {2} \\Delta_ {x} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {18}\n$$\n",
755
+ "text_format": "latex",
756
+ "bbox": [
757
+ 560,
758
+ 534,
759
+ 921,
760
+ 580
761
+ ],
762
+ "page_idx": 2
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "where the terms at the order of $\\delta_x^2$ are omitted. Similarly, by omitting the terms of $\\delta_x^2$ , $\\gamma_2$ can be approximated as follows:",
767
+ "bbox": [
768
+ 503,
769
+ 590,
770
+ 919,
771
+ 623
772
+ ],
773
+ "page_idx": 2
774
+ },
775
+ {
776
+ "type": "equation",
777
+ "text": "\n$$\n\\begin{array}{l} \\gamma_ {2} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{\\left(\\delta^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}} \\tag {19} \\\\ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta_ {x}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}. \\\\ \\end{array}\n$$\n",
778
+ "text_format": "latex",
779
+ "bbox": [
780
+ 552,
781
+ 632,
782
+ 919,
783
+ 728
784
+ ],
785
+ "page_idx": 2
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "Similarly, $\\gamma_3, \\gamma_4$ and $\\gamma_5$ can be approximated as follows:",
790
+ "bbox": [
791
+ 504,
792
+ 736,
793
+ 893,
794
+ 752
795
+ ],
796
+ "page_idx": 2
797
+ },
798
+ {
799
+ "type": "equation",
800
+ "text": "\n$$\n\\gamma_ {3} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta_ {x} ^ {3}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}, \\tag {20}\n$$\n",
801
+ "text_format": "latex",
802
+ "bbox": [
803
+ 539,
804
+ 761,
805
+ 919,
806
+ 808
807
+ ],
808
+ "page_idx": 2
809
+ },
810
+ {
811
+ "type": "equation",
812
+ "text": "\n$$\n\\gamma_ {4} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N}} \\frac {i ^ {2} \\Delta_ {y} ^ {2}}{\\left((n - 1) ^ {2} \\Delta_ {x} ^ {2} + i ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {21}\n$$\n",
813
+ "text_format": "latex",
814
+ "bbox": [
815
+ 539,
816
+ 810,
817
+ 919,
818
+ 857
819
+ ],
820
+ "page_idx": 2
821
+ },
822
+ {
823
+ "type": "equation",
824
+ "text": "\n$$\n\\gamma_ {5} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\Delta_ {x} i ^ {2} \\Delta_ {y} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + i \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}. \\tag {22}\n$$\n",
825
+ "text_format": "latex",
826
+ "bbox": [
827
+ 539,
828
+ 859,
829
+ 919,
830
+ 905
831
+ ],
832
+ "page_idx": 2
833
+ },
834
+ {
835
+ "type": "text",
836
+ "text": "To facilitate the analysis of this local-maximum property of CRLB, assume that $\\Delta_x = \\Delta_y = \\Delta \\gg d_{\\mathrm{H}}$ and $\\tilde{N} = \\frac{N}{\\tilde{N}}$ , which",
837
+ "bbox": [
838
+ 503,
839
+ 914,
840
+ 921,
841
+ 948
842
+ ],
843
+ "page_idx": 2
844
+ },
845
+ {
846
+ "type": "page_number",
847
+ "text": "3",
848
+ "bbox": [
849
+ 911,
850
+ 30,
851
+ 919,
852
+ 40
853
+ ],
854
+ "page_idx": 2
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "means that $\\gamma_{1} = \\gamma_{3}$ , and hence the CRLB can be simplified as follows:",
859
+ "bbox": [
860
+ 73,
861
+ 69,
862
+ 491,
863
+ 98
864
+ ],
865
+ "page_idx": 3
866
+ },
867
+ {
868
+ "type": "equation",
869
+ "text": "\n$$\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta}{\\bar {\\beta} _ {n i} ^ {2}} - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} \\right], \\\\ \\end{array}\n$$\n",
870
+ "text_format": "latex",
871
+ "bbox": [
872
+ 84,
873
+ 101,
874
+ 480,
875
+ 202
876
+ ],
877
+ "page_idx": 3
878
+ },
879
+ {
880
+ "type": "text",
881
+ "text": "where $\\bar{\\beta}_{ni} = (n^2 + (i - 1)^2)\\Delta^2 + d_{\\mathrm{H}}^2$",
882
+ "bbox": [
883
+ 73,
884
+ 204,
885
+ 338,
886
+ 222
887
+ ],
888
+ "page_idx": 3
889
+ },
890
+ {
891
+ "type": "text",
892
+ "text": "Note that if $i = \\frac{N}{N}$ , $\\sum_{n=1}^{\\tilde{N}-1} \\frac{4n^3\\Delta^3}{\\left((n^2+(i-1)^2)\\Delta^2+d_{\\mathrm{H}}^2\\right)^3}$ is an insignificant term, which means that the CRLB can be further simplified as follows:",
893
+ "bbox": [
894
+ 73,
895
+ 220,
896
+ 491,
897
+ 273
898
+ ],
899
+ "page_idx": 3
900
+ },
901
+ {
902
+ "type": "equation",
903
+ "text": "\n$$\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + 2 \\Delta \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\left(\\left((i - 1) ^ {2} - 3 n ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right)}{\\left(\\left(n ^ {2} + (i - 1) ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}} \\right]. \\tag {23} \\\\ \\end{array}\n$$\n",
904
+ "text_format": "latex",
905
+ "bbox": [
906
+ 84,
907
+ 277,
908
+ 490,
909
+ 380
910
+ ],
911
+ "page_idx": 3
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "For the case with $\\delta_x = 0$ , i.e., the user is located right underneath of the pinching antenna at $\\psi_{11}^{\\mathrm{Pin}}$ , by using the assumption that $\\Delta \\gg d$ , the CRLB can be expressed as follows:",
916
+ "bbox": [
917
+ 73,
918
+ 382,
919
+ 491,
920
+ 441
921
+ ],
922
+ "page_idx": 3
923
+ },
924
+ {
925
+ "type": "equation",
926
+ "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\frac {2}{\\Delta^ {3}} \\gamma_ {6}, \\tag {24}\n$$\n",
927
+ "text_format": "latex",
928
+ "bbox": [
929
+ 179,
930
+ 441,
931
+ 488,
932
+ 476
933
+ ],
934
+ "page_idx": 3
935
+ },
936
+ {
937
+ "type": "text",
938
+ "text": "where $\\gamma_6 = \\sum_{i=1}^{N} \\sum_{n=1}^{\\tilde{N}-1} \\frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}$ . We note that the terms of $\\gamma_6$ decay rapidly by increasing $n$ and $i$ , i.e., $\\gamma_6$ can be approximated by keeping the dominant negative term ( $n = 1$ and $i = 1$ ) and the dominant positive term ( $n = 1$ and $i = 3$ ), i.e., $\\gamma_6 \\approx -3 + \\frac{1}{125}$ , which means $\\frac{\\partial \\mathrm{CRB}_m}{\\partial x_m} \\leq 0$ for the case with $\\delta_x = 0$ . For the case of $\\delta_x \\neq 0$ , the CRLB can be approximated as follows:",
939
+ "bbox": [
940
+ 73,
941
+ 479,
942
+ 491,
943
+ 592
944
+ ],
945
+ "page_idx": 3
946
+ },
947
+ {
948
+ "type": "equation",
949
+ "text": "\n$$\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\frac {2 \\delta_ {x}}{d _ {\\mathrm {H}} ^ {4}} + \\frac {2}{\\Delta^ {3}} \\gamma_ {6} \\right].\n$$\n",
950
+ "text_format": "latex",
951
+ "bbox": [
952
+ 138,
953
+ 595,
954
+ 426,
955
+ 630
956
+ ],
957
+ "page_idx": 3
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "Due to the assumption of $\\Delta \\gg d_{\\mathrm{H}}$ , the term $\\frac{2\\delta_x}{d_{\\mathrm{H}}^4}$ is dominant, and hence $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0$ if $\\delta_{x} < 0$ . In summary, $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} < 0$ if the user's location is $(x_{11}^{\\mathrm{Pin}},y_{11}^{\\mathrm{Pin}},0)$ , and $\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0$ if the user's location is $(x_{11}^{\\mathrm{Pin}} + \\delta_x,y_{11}^{\\mathrm{Pin}} + \\delta_y,d_{\\mathrm{H}})$ . A similar conclusion can be established to $\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}$ , which means that there exists a local maximum for the CRLB around $\\psi_{n}^{\\mathrm{Pin}}$",
962
+ "bbox": [
963
+ 73,
964
+ 633,
965
+ 491,
966
+ 733
967
+ ],
968
+ "page_idx": 3
969
+ },
970
+ {
971
+ "type": "text",
972
+ "text": "Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks.",
973
+ "bbox": [
974
+ 73,
975
+ 734,
976
+ 491,
977
+ 854
978
+ ],
979
+ "page_idx": 3
980
+ },
981
+ {
982
+ "type": "text",
983
+ "text": "IV. NUMERICAL STUDIES",
984
+ "text_level": 1,
985
+ "bbox": [
986
+ 189,
987
+ 864,
988
+ 375,
989
+ 878
990
+ ],
991
+ "page_idx": 3
992
+ },
993
+ {
994
+ "type": "text",
995
+ "text": "In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where $K_{E} = 0.01$ , $D_{\\mathrm{W}} = 10 \\mathrm{~m}$ and $D_{\\mathrm{L}} = 40 \\mathrm{~m}$ , unless stated otherwise.",
996
+ "bbox": [
997
+ 73,
998
+ 883,
999
+ 491,
1000
+ 944
1001
+ ],
1002
+ "page_idx": 3
1003
+ },
1004
+ {
1005
+ "type": "image",
1006
+ "img_path": "images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg",
1007
+ "image_caption": [
1008
+ "Fig. 1. Averaged CRLBs, $\\mathrm{CRB}_m$ , achieved by the considered antenna systems, where $N_{\\mathrm{WG}} = 2$ and $d = 3\\mathrm{m}$ . For the pinching-antenna system, on each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being $a$ and its center at the origin."
1009
+ ],
1010
+ "image_footnote": [],
1011
+ "bbox": [
1012
+ 573,
1013
+ 65,
1014
+ 856,
1015
+ 234
1016
+ ],
1017
+ "page_idx": 3
1018
+ },
1019
+ {
1020
+ "type": "image",
1021
+ "img_path": "images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg",
1022
+ "image_caption": [
1023
+ "(a) Conventional Antennas"
1024
+ ],
1025
+ "image_footnote": [],
1026
+ "bbox": [
1027
+ 570,
1028
+ 321,
1029
+ 839,
1030
+ 468
1031
+ ],
1032
+ "page_idx": 3
1033
+ },
1034
+ {
1035
+ "type": "image",
1036
+ "img_path": "images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg",
1037
+ "image_caption": [
1038
+ "(b) Pinching Antennas",
1039
+ "Fig. 2. CRLBs achieved by the considered antenna systems. $N = 20$ , $N_{\\mathrm{WG}} = 2$ and $d = 3 \\, \\mathrm{m}$ . On each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced."
1040
+ ],
1041
+ "image_footnote": [],
1042
+ "bbox": [
1043
+ 573,
1044
+ 507,
1045
+ 836,
1046
+ 652
1047
+ ],
1048
+ "page_idx": 3
1049
+ },
1050
+ {
1051
+ "type": "text",
1052
+ "text": "In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where $\\mathrm{U}_m$ is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that $\\mathrm{U}_m$ cannot be located in a square area with its side being $a$ and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of $N$ and $a$ .",
1053
+ "bbox": [
1054
+ 501,
1055
+ 715,
1056
+ 921,
1057
+ 866
1058
+ ],
1059
+ "page_idx": 3
1060
+ },
1061
+ {
1062
+ "type": "text",
1063
+ "text": "Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which",
1064
+ "bbox": [
1065
+ 501,
1066
+ 869,
1067
+ 921,
1068
+ 945
1069
+ ],
1070
+ "page_idx": 3
1071
+ },
1072
+ {
1073
+ "type": "page_number",
1074
+ "text": "4",
1075
+ "bbox": [
1076
+ 911,
1077
+ 31,
1078
+ 919,
1079
+ 40
1080
+ ],
1081
+ "page_idx": 3
1082
+ },
1083
+ {
1084
+ "type": "image",
1085
+ "img_path": "images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg",
1086
+ "image_caption": [
1087
+ "(a) Positioning with a focal point at $\\left(-\\frac{D_{\\mathrm{L}}}{4},0,0\\right)$"
1088
+ ],
1089
+ "image_footnote": [],
1090
+ "bbox": [
1091
+ 145,
1092
+ 51,
1093
+ 411,
1094
+ 196
1095
+ ],
1096
+ "page_idx": 4
1097
+ },
1098
+ {
1099
+ "type": "image",
1100
+ "img_path": "images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg",
1101
+ "image_caption": [
1102
+ "(b) Positioning with a focal point at $\\left(\\frac{D_{\\mathrm{L}}}{4},0,0\\right)$",
1103
+ "Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. $N = 20$ , $N_{\\mathrm{WG}} = 2$ and $d = 3 \\mathrm{~m}$ . On each waveguide, there are $\\frac{N}{N_{\\mathrm{WG}}}$ antennas, which are equally spaced in a segment with its length being $\\frac{D_{\\mathrm{L}}}{2}$ and its center at the focal points shown in the figures."
1104
+ ],
1105
+ "image_footnote": [],
1106
+ "bbox": [
1107
+ 143,
1108
+ 239,
1109
+ 406,
1110
+ 385
1111
+ ],
1112
+ "page_idx": 4
1113
+ },
1114
+ {
1115
+ "type": "text",
1116
+ "text": "confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3.",
1117
+ "bbox": [
1118
+ 73,
1119
+ 465,
1120
+ 490,
1121
+ 599
1122
+ ],
1123
+ "page_idx": 4
1124
+ },
1125
+ {
1126
+ "type": "text",
1127
+ "text": "Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified.",
1128
+ "bbox": [
1129
+ 73,
1130
+ 601,
1131
+ 491,
1132
+ 796
1133
+ ],
1134
+ "page_idx": 4
1135
+ },
1136
+ {
1137
+ "type": "text",
1138
+ "text": "V. CONCLUSIONS",
1139
+ "text_level": 1,
1140
+ "bbox": [
1141
+ 217,
1142
+ 804,
1143
+ 346,
1144
+ 818
1145
+ ],
1146
+ "page_idx": 4
1147
+ },
1148
+ {
1149
+ "type": "text",
1150
+ "text": "This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and",
1151
+ "bbox": [
1152
+ 73,
1153
+ 823,
1154
+ 491,
1155
+ 946
1156
+ ],
1157
+ "page_idx": 4
1158
+ },
1159
+ {
1160
+ "type": "image",
1161
+ "img_path": "images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg",
1162
+ "image_caption": [
1163
+ "Fig. 4. Impact of the antenna spacing on the CRLB. $N = 4$ pinching antennas are activated in a square-shape area with the antenna spacing being $\\Delta$ and $\\mathrm{U}_m$ located at the center of the area, where $N_{\\mathrm{WG}} = 2$ . The analytical results are based on (16)."
1164
+ ],
1165
+ "image_footnote": [],
1166
+ "bbox": [
1167
+ 573,
1168
+ 40,
1169
+ 857,
1170
+ 210
1171
+ ],
1172
+ "page_idx": 4
1173
+ },
1174
+ {
1175
+ "type": "text",
1176
+ "text": "reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.",
1177
+ "bbox": [
1178
+ 503,
1179
+ 268,
1180
+ 921,
1181
+ 299
1182
+ ],
1183
+ "page_idx": 4
1184
+ },
1185
+ {
1186
+ "type": "text",
1187
+ "text": "REFERENCES",
1188
+ "text_level": 1,
1189
+ "bbox": [
1190
+ 663,
1191
+ 306,
1192
+ 761,
1193
+ 318
1194
+ ],
1195
+ "page_idx": 4
1196
+ },
1197
+ {
1198
+ "type": "list",
1199
+ "sub_type": "ref_text",
1200
+ "list_items": [
1201
+ "[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna - using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022.",
1202
+ "[2] Z. Ding, R. Schober, and H. V. Poor, \"Flexible-antenna systems: A pinching-antenna perspective,\" IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376.",
1203
+ "[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554.",
1204
+ "[4] K. Wang, Z. Ding, and R. Schober, \"Antenna activation for NOMA assisted pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969.",
1205
+ "[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, \"Array gain for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657.",
1206
+ "[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917.",
1207
+ "[7] Y. Xu, Z. Ding, and G. Karagiannidis, \"Rate maximization for downlink pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629.",
1208
+ "[8] X. Mu, G. Zhu, and Y. Liu, \"Pinching-antenna system (PASS)-enabled multicast communications,\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624.",
1209
+ "[9] J. Xiao, J. Wang, and Y. Liu, \"Channel estimation for pinching-antenna systems (PASS),\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268.",
1210
+ "[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921.",
1211
+ "[11] X. Xie, Y. Lu, and Z. Ding, \"Graph neural network enabled pinching antennas,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447.",
1212
+ "[12] J. Guo, Y. Liu, and A. Nallanathan, \"GPASS: Deep learning for beamforming in pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438.",
1213
+ "[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892.",
1214
+ "[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, \"Physical layer security for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075.",
1215
+ "[15] Y. Qin, Y. Fu, and H. Zhang, \"Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872.",
1216
+ "[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, \"Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond,\" IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022.",
1217
+ "[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5."
1218
+ ],
1219
+ "bbox": [
1220
+ 506,
1221
+ 325,
1222
+ 921,
1223
+ 926
1224
+ ],
1225
+ "page_idx": 4
1226
+ },
1227
+ {
1228
+ "type": "page_number",
1229
+ "text": "5",
1230
+ "bbox": [
1231
+ 911,
1232
+ 30,
1233
+ 919,
1234
+ 40
1235
+ ],
1236
+ "page_idx": 4
1237
+ }
1238
+ ]
data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_model.json ADDED
@@ -0,0 +1,1442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "page_number",
5
+ "bbox": [
6
+ 0.912,
7
+ 0.031,
8
+ 0.921,
9
+ 0.041
10
+ ],
11
+ "angle": 0,
12
+ "content": "1"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.125,
18
+ 0.042,
19
+ 0.873,
20
+ 0.07
21
+ ],
22
+ "angle": 0,
23
+ "content": "Pinching-Antenna Assisted ISAC: A CRLB Perspective"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.391,
29
+ 0.08,
30
+ 0.6,
31
+ 0.096
32
+ ],
33
+ "angle": 0,
34
+ "content": "Zhiguo Ding, Fellow, IEEE"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.075,
40
+ 0.115,
41
+ 0.493,
42
+ 0.282
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning."
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.075,
51
+ 0.284,
52
+ 0.492,
53
+ 0.323
54
+ ],
55
+ "angle": 0,
56
+ "content": "Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory."
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.217,
62
+ 0.327,
63
+ 0.35,
64
+ 0.34
65
+ ],
66
+ "angle": 0,
67
+ "content": "I. INTRODUCTION"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.074,
73
+ 0.347,
74
+ 0.492,
75
+ 0.542
76
+ ],
77
+ "angle": 0,
78
+ "content": "Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1]."
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.074,
84
+ 0.543,
85
+ 0.493,
86
+ 0.845
87
+ ],
88
+ "angle": 0,
89
+ "content": "In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14]."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.074,
95
+ 0.846,
96
+ 0.492,
97
+ 0.907
98
+ ],
99
+ "angle": 0,
100
+ "content": "However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.075,
106
+ 0.908,
107
+ 0.492,
108
+ 0.933
109
+ ],
110
+ "angle": 0,
111
+ "content": "Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.503,
117
+ 0.115,
118
+ 0.925,
119
+ 0.373
120
+ ],
121
+ "angle": 0,
122
+ "content": "antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning."
123
+ },
124
+ {
125
+ "type": "title",
126
+ "bbox": [
127
+ 0.641,
128
+ 0.389,
129
+ 0.787,
130
+ 0.403
131
+ ],
132
+ "angle": 0,
133
+ "content": "II. SYSTEM MODEL"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.503,
139
+ 0.413,
140
+ 0.922,
141
+ 0.565
142
+ ],
143
+ "angle": 0,
144
+ "content": "Consider a pinching-antenna system that is deployed to provide ISAC services to \\(M\\) single-antenna users, denoted by \\(\\mathrm{U}_m\\). Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that \\(N\\) pinching antennas are activated on \\(N_{\\mathrm{WG}}\\) waveguides. The location of the \\(n\\)-th pinching antenna is denoted by \\(\\psi_n^{\\mathrm{Pin}} = (x_n^{\\mathrm{Pin}}, y_n^{\\mathrm{Pin}}, d_{\\mathrm{H}})\\), where \\(d_{\\mathrm{H}}\\) denotes the height of the waveguides."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.504,
150
+ 0.566,
151
+ 0.922,
152
+ 0.642
153
+ ],
154
+ "angle": 0,
155
+ "content": "The service area is denoted by \\(\\mathcal{A}\\) and is assumed to be a rectangle with its two sides denoted by \\(D_{\\mathrm{W}}\\) and \\(D_{\\mathrm{L}}\\), respectively, and its center located at \\((0,0,0)\\). The users are assumed to be uniformly distributed in \\(\\mathcal{A}\\), and \\(\\mathrm{U}_m\\)'s location is denoted by \\(\\psi_m = (x_m,y_m,0)\\)."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.504,
161
+ 0.642,
162
+ 0.922,
163
+ 0.687
164
+ ],
165
+ "angle": 0,
166
+ "content": "Denote the distance from the \\(n\\)-th pinching antenna to the \\(m\\)-th user by \\(d_{mn}\\). Distance (range) estimates for the \\(m\\)-th user can be modeled as follows: [17]"
167
+ },
168
+ {
169
+ "type": "equation",
170
+ "bbox": [
171
+ 0.641,
172
+ 0.697,
173
+ 0.921,
174
+ 0.715
175
+ ],
176
+ "angle": 0,
177
+ "content": "\\[\n\\hat {d} _ {m n} = d _ {m n} + w _ {m n}, \\tag {1}\n\\]"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.504,
183
+ 0.728,
184
+ 0.922,
185
+ 0.78
186
+ ],
187
+ "angle": 0,
188
+ "content": "where \\( d_{mn} = \\sqrt{(x_m - x_n^{\\mathrm{Pin}})^2 + (y_m - y_n^{\\mathrm{Pin}})^2 + d_{\\mathrm{H}}^2} \\), and \\( w_{mn} \\) is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.,"
189
+ },
190
+ {
191
+ "type": "equation",
192
+ "bbox": [
193
+ 0.525,
194
+ 0.788,
195
+ 0.921,
196
+ 0.813
197
+ ],
198
+ "angle": 0,
199
+ "content": "\\[\n\\sigma_ {m n} ^ {2} = K _ {E} \\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right), \\tag {2}\n\\]"
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.504,
205
+ 0.821,
206
+ 0.921,
207
+ 0.851
208
+ ],
209
+ "angle": 0,
210
+ "content": "\\(K_{E}\\) denotes a system parameter decided by the range estimation environment."
211
+ },
212
+ {
213
+ "type": "title",
214
+ "bbox": [
215
+ 0.516,
216
+ 0.872,
217
+ 0.908,
218
+ 0.886
219
+ ],
220
+ "angle": 0,
221
+ "content": "III. IMPACT OF PINCHING ANTENNAS ON POSITIONING"
222
+ },
223
+ {
224
+ "type": "title",
225
+ "bbox": [
226
+ 0.504,
227
+ 0.892,
228
+ 0.842,
229
+ 0.908
230
+ ],
231
+ "angle": 0,
232
+ "content": "A. CRLB Achieved by Pinching-Antenna Systems"
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.504,
238
+ 0.915,
239
+ 0.922,
240
+ 0.946
241
+ ],
242
+ "angle": 0,
243
+ "content": "Without loss of generality, the impact of pinching antennas on \\(\\mathrm{U}_m\\) 's localization is focused on. The joint probability den"
244
+ },
245
+ {
246
+ "type": "aside_text",
247
+ "bbox": [
248
+ 0.023,
249
+ 0.256,
250
+ 0.059,
251
+ 0.678
252
+ ],
253
+ "angle": 270,
254
+ "content": "arXiv:2504.05792v1 [cs.IT] 8 Apr 2025"
255
+ }
256
+ ],
257
+ [
258
+ {
259
+ "type": "page_number",
260
+ "bbox": [
261
+ 0.912,
262
+ 0.031,
263
+ 0.921,
264
+ 0.041
265
+ ],
266
+ "angle": 0,
267
+ "content": "2"
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.075,
273
+ 0.068,
274
+ 0.493,
275
+ 0.1
276
+ ],
277
+ "angle": 0,
278
+ "content": "sity function (pdf) of \\(\\hat{d}_{mn}\\) conditioned on \\(d_{mn}\\), \\(1\\leq n\\leq N\\), is given by"
279
+ },
280
+ {
281
+ "type": "equation",
282
+ "bbox": [
283
+ 0.103,
284
+ 0.106,
285
+ 0.493,
286
+ 0.148
287
+ ],
288
+ "angle": 0,
289
+ "content": "\\[\nf (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = \\prod_ {n = 1} ^ {N} \\frac {1}{\\sqrt {2 \\pi \\sigma_ {m n} ^ {2}}} e ^ {- \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}}, \\tag {3}\n\\]"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.076,
295
+ 0.152,
296
+ 0.361,
297
+ 0.168
298
+ ],
299
+ "angle": 0,
300
+ "content": "whose log-likelihood function is given by"
301
+ },
302
+ {
303
+ "type": "equation",
304
+ "bbox": [
305
+ 0.143,
306
+ 0.174,
307
+ 0.49,
308
+ 0.245
309
+ ],
310
+ "angle": 0,
311
+ "content": "\\[\n\\begin{array}{l} L \\triangleq \\ln f (\\hat {d} _ {m 1}, \\dots , \\hat {d} _ {m N}) = - \\frac {N}{2} \\ln (2 \\pi) \\tag {4} \\\\ - \\sum_ {n = 1} ^ {N} \\ln \\sigma_ {m n} - \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \\sigma_ {m n} ^ {2}}. \\\\ \\end{array}\n\\]"
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.076,
317
+ 0.25,
318
+ 0.415,
319
+ 0.266
320
+ ],
321
+ "angle": 0,
322
+ "content": "Recall that the CRLB for \\( x_{m} \\) and \\( y_{m} \\) is given by"
323
+ },
324
+ {
325
+ "type": "equation",
326
+ "bbox": [
327
+ 0.093,
328
+ 0.27,
329
+ 0.49,
330
+ 0.3
331
+ ],
332
+ "angle": 0,
333
+ "content": "\\[\n\\mathcal {E} \\left\\{\\left(\\hat {x} _ {m} - x _ {m}\\right) ^ {2} + \\left(\\hat {y} _ {m} - y _ {m}\\right) ^ {2} \\right\\} \\geq \\frac {1}{J _ {x} ^ {m}} + \\frac {1}{J _ {y} ^ {m}} \\triangleq \\mathrm {C R B} _ {m}, \\tag {5}\n\\]"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.076,
339
+ 0.305,
340
+ 0.49,
341
+ 0.341
342
+ ],
343
+ "angle": 0,
344
+ "content": "where \\(\\hat{x}_m\\) and \\(\\hat{y}_m\\) denote the estimates of \\(x_m\\) and \\(y_m\\), respectively, \\(J_x^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial x_m^2}\\right\\}\\) and \\(J_y^m = \\mathcal{E}\\left\\{-\\frac{\\partial^2L}{\\partial y_m^2}\\right\\}\\)."
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.093,
350
+ 0.34,
351
+ 0.316,
352
+ 0.359
353
+ ],
354
+ "angle": 0,
355
+ "content": "\\(\\frac{\\partial L}{\\partial x_m}\\) can be obtained as follows:"
356
+ },
357
+ {
358
+ "type": "equation",
359
+ "bbox": [
360
+ 0.092,
361
+ 0.365,
362
+ 0.49,
363
+ 0.449
364
+ ],
365
+ "angle": 0,
366
+ "content": "\\[\n\\begin{array}{l} \\frac {\\partial L}{\\partial x _ {m}} = - \\sum_ {n = 1} ^ {N} \\frac {1}{\\sigma_ {m n}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}} - \\sum_ {n = 1} ^ {N} \\frac {\\left(d _ {m n} - \\hat {d} _ {m n}\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\partial d _ {m n}}{\\partial x _ {m}} \\tag {6} \\\\ + \\sum_ {n = 1} ^ {N} \\frac {(\\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\\sigma_ {m n} ^ {3}} \\frac {\\partial \\sigma_ {m n}}{\\partial x _ {m}}. \\\\ \\end{array}\n\\]"
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.075,
372
+ 0.456,
373
+ 0.492,
374
+ 0.522
375
+ ],
376
+ "angle": 0,
377
+ "content": "The expression of \\(\\frac{\\partial^2L}{\\partial x_m^2}\\) is quite invoked; however, by using the fact that \\(\\mathcal{E}\\{\\hat{d}_{mn} - d_{mn}\\} = 0\\) and following the steps similar to those in [17], the expectation of \\(\\frac{\\partial^2L}{\\partial x_m^2}\\), i.e., \\(J_x^m\\), can be obtained as follows:"
378
+ },
379
+ {
380
+ "type": "equation",
381
+ "bbox": [
382
+ 0.082,
383
+ 0.527,
384
+ 0.49,
385
+ 0.582
386
+ ],
387
+ "angle": 0,
388
+ "content": "\\[\nJ _ {x} ^ {m} = \\sum_ {n = 1} ^ {N} \\frac {\\left(2 K _ {E} + 1\\right)}{\\sigma_ {m n} ^ {2}} \\frac {\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}}{\\left(x _ {m} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}}. \\tag {7}\n\\]"
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.075,
394
+ 0.589,
395
+ 0.493,
396
+ 0.634
397
+ ],
398
+ "angle": 0,
399
+ "content": "\\(J_{y}^{m}\\) can be obtained in a similar form, which means that the CRLB for estimating \\(\\mathrm{U}_m\\) 's location can be expressed as follows:"
400
+ },
401
+ {
402
+ "type": "equation",
403
+ "bbox": [
404
+ 0.077,
405
+ 0.638,
406
+ 0.49,
407
+ 0.753
408
+ ],
409
+ "angle": 0,
410
+ "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2}}{\\left(\\left(x _ {m} - x _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + \\left(y _ {m} - y _ {n} ^ {\\operatorname* {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {8} \\\\ \\end{array}\n\\]"
411
+ },
412
+ {
413
+ "type": "title",
414
+ "bbox": [
415
+ 0.076,
416
+ 0.771,
417
+ 0.361,
418
+ 0.786
419
+ ],
420
+ "angle": 0,
421
+ "content": "B. Performance Analysis Based on CRLB"
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.075,
427
+ 0.788,
428
+ 0.493,
429
+ 0.901
430
+ ],
431
+ "angle": 0,
432
+ "content": "1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at \\((0,0,0)\\) and its radius being \\(\\frac{\\lambda}{4\\sin\\left(\\frac{\\pi}{N}\\right)}\\), which ensures that the minimal pairwise distance of the antennas is \\(\\frac{\\lambda}{2}\\), where \\(\\lambda\\) denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:"
433
+ },
434
+ {
435
+ "type": "equation",
436
+ "bbox": [
437
+ 0.093,
438
+ 0.904,
439
+ 0.49,
440
+ 0.944
441
+ ],
442
+ "angle": 0,
443
+ "content": "\\[\n\\Delta_ {\\mathrm {C R B}} = \\int_ {- \\frac {D _ {\\mathrm {L}}}{2}} ^ {\\frac {D _ {\\mathrm {L}}}{2}} \\int_ {- \\frac {D _ {\\mathrm {W}}}{2}} ^ {\\frac {D _ {\\mathrm {W}}}{2}} \\left(\\mathrm {C R B} _ {m} - \\mathrm {C R B} _ {m} ^ {\\text {C o n v}}\\right) \\frac {d y _ {m}}{D _ {\\mathrm {W}}} \\frac {d x _ {m}}{D _ {\\mathrm {L}}}, \\tag {9}\n\\]"
444
+ },
445
+ {
446
+ "type": "text",
447
+ "bbox": [
448
+ 0.503,
449
+ 0.068,
450
+ 0.923,
451
+ 0.221
452
+ ],
453
+ "angle": 0,
454
+ "content": "where \\(\\mathrm{CRB}_m^{\\mathrm{Conv}}\\) can be obtained similarly to \\(\\mathrm{CRB}_m\\) by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of \\(\\Delta_{\\mathrm{CRB}}\\) is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at \\(\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)\\). The use of conventional antennas can achieve the following CRLB:"
455
+ },
456
+ {
457
+ "type": "equation",
458
+ "bbox": [
459
+ 0.527,
460
+ 0.228,
461
+ 0.921,
462
+ 0.469
463
+ ],
464
+ "angle": 0,
465
+ "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} ^ {\\mathrm {C o n v}} = \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {C o n v}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {C o n v}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right) \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\\\ \\stackrel {(a)} {\\approx} \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {4 \\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{N D _ {\\mathrm {L}} ^ {2}} + \\frac {\\left(\\frac {D _ {\\mathrm {L}} ^ {2}}{4} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\sum_ {n = 1} ^ {N} (y _ {n} ^ {\\mathrm {C o n v}}) ^ {2}}\\right) \\\\ \\xrightarrow {(b)} \\infty , \\tag {10} \\\\ \\end{array}\n\\]"
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.503,
471
+ 0.479,
472
+ 0.921,
473
+ 0.554
474
+ ],
475
+ "angle": 0,
476
+ "content": "where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that \\( |y_{n}^{\\mathrm{Conv}}| \\to 0 \\) for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths)."
477
+ },
478
+ {
479
+ "type": "text",
480
+ "bbox": [
481
+ 0.504,
482
+ 0.555,
483
+ 0.922,
484
+ 0.616
485
+ ],
486
+ "angle": 0,
487
+ "content": "On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at \\(\\left(\\frac{D_{\\mathrm{L}}}{2},0,0\\right)\\), the corresponding CRLB can be expressed as follows:"
488
+ },
489
+ {
490
+ "type": "equation",
491
+ "bbox": [
492
+ 0.512,
493
+ 0.624,
494
+ 0.921,
495
+ 0.784
496
+ ],
497
+ "angle": 0,
498
+ "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} = \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + (y _ {n} ^ {\\mathrm {P i n}}) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}} \\right. \\\\ \\left. + \\frac {1}{\\sum_ {n = 1} ^ {N} \\frac {\\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2}}{\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\mathrm {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}}\\right). \\tag {11} \\\\ \\end{array}\n\\]"
499
+ },
500
+ {
501
+ "type": "text",
502
+ "bbox": [
503
+ 0.504,
504
+ 0.79,
505
+ 0.921,
506
+ 0.822
507
+ ],
508
+ "angle": 0,
509
+ "content": "For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:"
510
+ },
511
+ {
512
+ "type": "equation",
513
+ "bbox": [
514
+ 0.512,
515
+ 0.829,
516
+ 0.921,
517
+ 0.942
518
+ ],
519
+ "angle": 0,
520
+ "content": "\\[\n\\begin{array}{l} \\mathrm {C R B} _ {m} \\leq \\frac {K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\operatorname {P i n}}\\right) ^ {2}} \\right. \\\\ \\left. + \\frac {\\left(\\left(\\frac {D _ {\\mathrm {L}}}{2} - x _ {n} ^ {\\text {P i n}}\\right) ^ {2} + \\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}{\\left(y _ {n} ^ {\\text {P i n}}\\right) ^ {2}}\\right), \\tag {12} \\\\ \\end{array}\n\\]"
521
+ }
522
+ ],
523
+ [
524
+ {
525
+ "type": "page_number",
526
+ "bbox": [
527
+ 0.912,
528
+ 0.031,
529
+ 0.921,
530
+ 0.041
531
+ ],
532
+ "angle": 0,
533
+ "content": "3"
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.074,
539
+ 0.069,
540
+ 0.493,
541
+ 0.146
542
+ ],
543
+ "angle": 0,
544
+ "content": "where \\( n \\) is an arbitrary integer between 1 and \\( N \\). Because of the diverse locations of the \\( N \\) pinching antennas, it is always possible to find \\( n \\in \\{1, \\dots, N\\} \\) which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.074,
550
+ 0.146,
551
+ 0.493,
552
+ 0.206
553
+ ],
554
+ "angle": 0,
555
+ "content": "Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.074,
561
+ 0.206,
562
+ 0.493,
563
+ 0.373
564
+ ],
565
+ "angle": 0,
566
+ "content": "2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are \\(\\tilde{N} = \\frac{N}{N_{\\mathrm{WG}}}\\) pinching antennas on each waveguide. Denote the location of the \\(n\\) -th antenna on the \\(i\\) -th waveguide by \\(\\psi_{in}^{\\mathrm{Pin}} = (x_{in}^{\\mathrm{Pin}},y_{in}^{\\mathrm{Pin}},d_{\\mathrm{H}})\\). Furthermore, assume that the antennas are equally spaced, and define \\(\\Delta_x = |x_{in}^{\\mathrm{Pin}} - x_{im}^{\\mathrm{Pin}}|\\) and \\(\\Delta_y = |x_{in}^{\\mathrm{Pin}} - x_{jn}^{\\mathrm{Pin}}|\\), \\(m\\neq n\\) and \\(i\\neq j\\)."
567
+ },
568
+ {
569
+ "type": "text",
570
+ "bbox": [
571
+ 0.075,
572
+ 0.372,
573
+ 0.493,
574
+ 0.477
575
+ ],
576
+ "angle": 0,
577
+ "content": "For illustrative purposes, assume that all \\(N\\) pinching antennas are activated in a square area with \\(\\mathrm{U}_m\\) at its center, where \\(\\tilde{N} = N_{\\mathrm{WG}}\\) and \\(\\Delta_x = \\Delta_y = \\Delta\\). This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define \\(\\bar{N} = \\frac{\\tilde{N}}{2}\\), and without loss of generality, assume that \\(\\bar{N}\\) is an even number."
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.075,
583
+ 0.479,
584
+ 0.492,
585
+ 0.509
586
+ ],
587
+ "angle": 0,
588
+ "content": "With these assumptions, the CRLB in (8) can be simplified as follows:"
589
+ },
590
+ {
591
+ "type": "equation",
592
+ "bbox": [
593
+ 0.096,
594
+ 0.516,
595
+ 0.473,
596
+ 0.568
597
+ ],
598
+ "angle": 0,
599
+ "content": "\\[\n\\mathrm {C R B} _ {m} = \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(n - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}} + \\frac {\\frac {K _ {E} \\Delta^ {2}}{4 (2 K _ {E} + 1)}}{\\sum_ {i = 1} ^ {\\bar {N}} \\sum_ {n = 1} ^ {\\bar {N}} \\frac {(i - \\frac {1}{2}) ^ {2}}{\\beta_ {n i} ^ {2}}},\n\\]"
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.074,
605
+ 0.578,
606
+ 0.493,
607
+ 0.764
608
+ ],
609
+ "angle": 0,
610
+ "content": "where \\( = \\left(n - \\frac{1}{2}\\right)^{2} + \\left(i - \\frac{1}{2}\\right)^{2} + \\frac{d_{\\mathrm{H}}^{2}}{\\Delta^{2}} \\). The above CRLB can be used to design the antenna placement, i.e., the optimal choice of \\( \\Delta \\) for minimizing the CRLB. Computer simulations can be used to verify that \\( \\frac{\\partial^2\\mathrm{CRB}_m}{\\partial\\Delta^2} > 0 \\), i.e., \\( \\mathrm{CRB}_m \\) is a convex function of \\( \\Delta \\), and hence convex optimization solvers can be used to find the optimal solution of \\( \\Delta \\) efficiently. To obtain an insightful understanding of the optimal choice of \\( \\Delta \\), a special case with \\( N = 4 \\) is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with \\( N = 4 \\), the CRLB can be simplified as follows:"
611
+ },
612
+ {
613
+ "type": "equation",
614
+ "bbox": [
615
+ 0.157,
616
+ 0.773,
617
+ 0.493,
618
+ 0.81
619
+ ],
620
+ "angle": 0,
621
+ "content": "\\[\n\\mathrm {C R B} _ {m} = \\frac {2 K _ {E} \\Delta^ {2}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{2} + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right) ^ {2}, \\tag {13}\n\\]"
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.075,
627
+ 0.817,
628
+ 0.345,
629
+ 0.833
630
+ ],
631
+ "angle": 0,
632
+ "content": "whose first-order derivative is given by"
633
+ },
634
+ {
635
+ "type": "equation",
636
+ "bbox": [
637
+ 0.095,
638
+ 0.842,
639
+ 0.493,
640
+ 0.877
641
+ ],
642
+ "angle": 0,
643
+ "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial \\Delta} = \\frac {4 K _ {E}}{(2 K _ {E} + 1)} \\left(\\frac {1}{2} \\Delta + \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta}\\right) \\left(\\frac {1}{2} - \\frac {d _ {\\mathrm {H}} ^ {2}}{\\Delta^ {2}}\\right). \\tag {14}\n\\]"
644
+ },
645
+ {
646
+ "type": "text",
647
+ "bbox": [
648
+ 0.075,
649
+ 0.884,
650
+ 0.421,
651
+ 0.9
652
+ ],
653
+ "angle": 0,
654
+ "content": "The second-order derivative of \\(\\mathrm{CRB}_m\\) is given by"
655
+ },
656
+ {
657
+ "type": "equation",
658
+ "bbox": [
659
+ 0.137,
660
+ 0.908,
661
+ 0.493,
662
+ 0.943
663
+ ],
664
+ "angle": 0,
665
+ "content": "\\[\n\\frac {\\partial^ {2} \\mathrm {C R B} _ {m}}{\\partial \\Delta^ {2}} = \\frac {4 K _ {E}}{\\left(2 K _ {E} + 1\\right)} \\left(\\frac {1}{4} + 3 \\frac {d _ {\\mathrm {H}} ^ {4}}{\\Delta^ {4}}\\right) > 0, \\tag {15}\n\\]"
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.504,
671
+ 0.069,
672
+ 0.923,
673
+ 0.115
674
+ ],
675
+ "angle": 0,
676
+ "content": "which means that \\(\\mathrm{CRB}_m\\) is a convex function of \\(\\Delta\\). Therefore, the optimal solution of \\(\\Delta\\) for minimizing the CRLB for the special case with \\(N = 4\\) is given by"
677
+ },
678
+ {
679
+ "type": "equation",
680
+ "bbox": [
681
+ 0.666,
682
+ 0.125,
683
+ 0.922,
684
+ 0.143
685
+ ],
686
+ "angle": 0,
687
+ "content": "\\[\n\\Delta^ {*} = \\sqrt {2} d _ {H}. \\tag {16}\n\\]"
688
+ },
689
+ {
690
+ "type": "text",
691
+ "bbox": [
692
+ 0.504,
693
+ 0.154,
694
+ 0.922,
695
+ 0.229
696
+ ],
697
+ "angle": 0,
698
+ "content": "Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., \\(\\Delta^{*} \\to 0\\) (or \\(\\frac{\\lambda}{2}\\) to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides."
699
+ },
700
+ {
701
+ "type": "text",
702
+ "bbox": [
703
+ 0.504,
704
+ 0.229,
705
+ 0.922,
706
+ 0.31
707
+ ],
708
+ "angle": 0,
709
+ "content": "3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, \\(\\psi_{n}^{\\mathrm{Pin}}\\), there exists a local maximum of \\(\\mathrm{CRB}_m\\) shown in (8). This local-maximum property can be revealed by studying \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}\\) and \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}\\). Without loss of generality, \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m}\\) is focused, and can be expressed as follows:"
710
+ },
711
+ {
712
+ "type": "equation",
713
+ "bbox": [
714
+ 0.526,
715
+ 0.318,
716
+ 0.922,
717
+ 0.353
718
+ ],
719
+ "angle": 0,
720
+ "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} = \\frac {K _ {E}}{(2 K _ {E} + 1)} \\left(- \\frac {1}{\\gamma_ {1} ^ {2}} [ \\gamma_ {2} - \\gamma_ {3} ] + \\frac {1}{\\gamma_ {4} ^ {2}} \\gamma_ {5}\\right), \\tag {17}\n\\]"
721
+ },
722
+ {
723
+ "type": "text",
724
+ "bbox": [
725
+ 0.504,
726
+ 0.362,
727
+ 0.922,
728
+ 0.453
729
+ ],
730
+ "angle": 0,
731
+ "content": "where \\(d_{mn}^2 = \\left(x_m - x_n^{\\mathrm{Pin}}\\right)^2 +\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2 +d_{\\mathrm{H}}^2,\\) \n\\(\\gamma_{1} = \\sum_{n = 1}^{N}\\frac{\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)^{2}}{d_{mn}^{4}},\\gamma_{2} = \\sum_{n = 1}^{N}\\frac{2\\left(x_{m} - x_{n}^{\\mathrm{Pin}}\\right)}{d_{mn}^{4}},\\gamma_{3} =\\) \n\\(\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)^3}{d_{mn}^6},\\gamma_4 = \\sum_{n = 1}^{N}\\frac{\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^2},\\mathrm{and}\\gamma_5 = \\end{array}\\) \n\\(\\begin{array}{r}\\sum_{n = 1}^{N}\\frac{4\\left(x_m - x_n^{\\mathrm{Pin}}\\right)\\left(y_m - y_n^{\\mathrm{Pin}}\\right)^2}{d_{mn}^6}. \\end{array}\\)"
732
+ },
733
+ {
734
+ "type": "text",
735
+ "bbox": [
736
+ 0.504,
737
+ 0.453,
738
+ 0.922,
739
+ 0.526
740
+ ],
741
+ "angle": 0,
742
+ "content": "Without loss of generality, assume that \\(\\mathrm{U}_m\\) is in the proximity of the first pinching antenna on the first waveguide, i.e., \\(x_{m} = x_{11}^{\\mathrm{Pin}} + \\delta_{x}\\) and \\(y_{m} = y_{11}^{\\mathrm{Pin}} + \\delta_{y}\\), where \\(\\delta_x\\to 0\\) and \\(\\delta_y\\rightarrow 0\\). In this case, \\(\\gamma_{1}\\) in (17) can be approximated as follows:"
743
+ },
744
+ {
745
+ "type": "equation",
746
+ "bbox": [
747
+ 0.561,
748
+ 0.535,
749
+ 0.922,
750
+ 0.582
751
+ ],
752
+ "angle": 0,
753
+ "content": "\\[\n\\gamma_ {1} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n ^ {2} \\Delta_ {x} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {18}\n\\]"
754
+ },
755
+ {
756
+ "type": "text",
757
+ "bbox": [
758
+ 0.504,
759
+ 0.592,
760
+ 0.921,
761
+ 0.624
762
+ ],
763
+ "angle": 0,
764
+ "content": "where the terms at the order of \\(\\delta_x^2\\) are omitted. Similarly, by omitting the terms of \\(\\delta_x^2\\), \\(\\gamma_2\\) can be approximated as follows:"
765
+ },
766
+ {
767
+ "type": "equation",
768
+ "bbox": [
769
+ 0.553,
770
+ 0.633,
771
+ 0.921,
772
+ 0.729
773
+ ],
774
+ "angle": 0,
775
+ "content": "\\[\n\\begin{array}{l} \\gamma_ {2} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{\\left(\\delta^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}} \\tag {19} \\\\ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta_ {x}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}. \\\\ \\end{array}\n\\]"
776
+ },
777
+ {
778
+ "type": "text",
779
+ "bbox": [
780
+ 0.505,
781
+ 0.737,
782
+ 0.895,
783
+ 0.753
784
+ ],
785
+ "angle": 0,
786
+ "content": "Similarly, \\(\\gamma_3, \\gamma_4\\) and \\(\\gamma_5\\) can be approximated as follows:"
787
+ },
788
+ {
789
+ "type": "equation",
790
+ "bbox": [
791
+ 0.54,
792
+ 0.762,
793
+ 0.921,
794
+ 0.809
795
+ ],
796
+ "angle": 0,
797
+ "content": "\\[\n\\gamma_ {3} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta_ {x} ^ {3}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + (i - 1) ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}, \\tag {20}\n\\]"
798
+ },
799
+ {
800
+ "type": "equation",
801
+ "bbox": [
802
+ 0.54,
803
+ 0.811,
804
+ 0.921,
805
+ 0.858
806
+ ],
807
+ "angle": 0,
808
+ "content": "\\[\n\\gamma_ {4} \\approx \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N}} \\frac {i ^ {2} \\Delta_ {y} ^ {2}}{\\left((n - 1) ^ {2} \\Delta_ {x} ^ {2} + i ^ {2} \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {2}}, \\tag {21}\n\\]"
809
+ },
810
+ {
811
+ "type": "equation",
812
+ "bbox": [
813
+ 0.54,
814
+ 0.86,
815
+ 0.921,
816
+ 0.906
817
+ ],
818
+ "angle": 0,
819
+ "content": "\\[\n\\gamma_ {5} \\approx - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\Delta_ {x} i ^ {2} \\Delta_ {y} ^ {2}}{\\left(n ^ {2} \\Delta_ {x} ^ {2} + i \\Delta_ {y} ^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}}. \\tag {22}\n\\]"
820
+ },
821
+ {
822
+ "type": "text",
823
+ "bbox": [
824
+ 0.504,
825
+ 0.915,
826
+ 0.922,
827
+ 0.949
828
+ ],
829
+ "angle": 0,
830
+ "content": "To facilitate the analysis of this local-maximum property of CRLB, assume that \\(\\Delta_x = \\Delta_y = \\Delta \\gg d_{\\mathrm{H}}\\) and \\(\\tilde{N} = \\frac{N}{\\tilde{N}}\\), which"
831
+ }
832
+ ],
833
+ [
834
+ {
835
+ "type": "page_number",
836
+ "bbox": [
837
+ 0.912,
838
+ 0.032,
839
+ 0.921,
840
+ 0.041
841
+ ],
842
+ "angle": 0,
843
+ "content": "4"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.074,
849
+ 0.07,
850
+ 0.493,
851
+ 0.099
852
+ ],
853
+ "angle": 0,
854
+ "content": "means that \\(\\gamma_{1} = \\gamma_{3}\\), and hence the CRLB can be simplified as follows:"
855
+ },
856
+ {
857
+ "type": "equation",
858
+ "bbox": [
859
+ 0.085,
860
+ 0.102,
861
+ 0.482,
862
+ 0.203
863
+ ],
864
+ "angle": 0,
865
+ "content": "\\[\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {2 n \\Delta}{\\bar {\\beta} _ {n i} ^ {2}} - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} - \\sum_ {i = 1} ^ {\\frac {N}{N} - 1} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {4 n ^ {3} \\Delta^ {3}}{\\bar {\\beta} _ {n i} ^ {3}} \\right], \\\\ \\end{array}\n\\]"
866
+ },
867
+ {
868
+ "type": "text",
869
+ "bbox": [
870
+ 0.075,
871
+ 0.205,
872
+ 0.339,
873
+ 0.223
874
+ ],
875
+ "angle": 0,
876
+ "content": "where \\(\\bar{\\beta}_{ni} = (n^2 + (i - 1)^2)\\Delta^2 + d_{\\mathrm{H}}^2\\)"
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.074,
882
+ 0.222,
883
+ 0.492,
884
+ 0.275
885
+ ],
886
+ "angle": 0,
887
+ "content": "Note that if \\( i = \\frac{N}{N} \\), \\( \\sum_{n=1}^{\\tilde{N}-1} \\frac{4n^3\\Delta^3}{\\left((n^2+(i-1)^2)\\Delta^2+d_{\\mathrm{H}}^2\\right)^3} \\) is an insignificant term, which means that the CRLB can be further simplified as follows:"
888
+ },
889
+ {
890
+ "type": "equation",
891
+ "bbox": [
892
+ 0.085,
893
+ 0.279,
894
+ 0.491,
895
+ 0.381
896
+ ],
897
+ "angle": 0,
898
+ "content": "\\[\n\\begin{array}{l} \\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\sum_ {i = 1} ^ {\\frac {N}{N}} \\frac {2 \\delta_ {x}}{(\\delta^ {2} + (i - 1) ^ {2} \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}) ^ {2}} \\right. \\\\ \\left. + 2 \\Delta \\sum_ {i = 1} ^ {\\frac {N}{N}} \\sum_ {n = 1} ^ {\\tilde {N} - 1} \\frac {n \\left(\\left((i - 1) ^ {2} - 3 n ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right)}{\\left(\\left(n ^ {2} + (i - 1) ^ {2}\\right) \\Delta^ {2} + d _ {\\mathrm {H}} ^ {2}\\right) ^ {3}} \\right]. \\tag {23} \\\\ \\end{array}\n\\]"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.074,
904
+ 0.383,
905
+ 0.492,
906
+ 0.442
907
+ ],
908
+ "angle": 0,
909
+ "content": "For the case with \\(\\delta_x = 0\\), i.e., the user is located right underneath of the pinching antenna at \\(\\psi_{11}^{\\mathrm{Pin}}\\), by using the assumption that \\(\\Delta \\gg d\\), the CRLB can be expressed as follows:"
910
+ },
911
+ {
912
+ "type": "equation",
913
+ "bbox": [
914
+ 0.18,
915
+ 0.443,
916
+ 0.49,
917
+ 0.477
918
+ ],
919
+ "angle": 0,
920
+ "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\frac {2}{\\Delta^ {3}} \\gamma_ {6}, \\tag {24}\n\\]"
921
+ },
922
+ {
923
+ "type": "text",
924
+ "bbox": [
925
+ 0.075,
926
+ 0.481,
927
+ 0.493,
928
+ 0.593
929
+ ],
930
+ "angle": 0,
931
+ "content": "where \\(\\gamma_6 = \\sum_{i=1}^{N} \\sum_{n=1}^{\\tilde{N}-1} \\frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}\\). We note that the terms of \\(\\gamma_6\\) decay rapidly by increasing \\(n\\) and \\(i\\), i.e., \\(\\gamma_6\\) can be approximated by keeping the dominant negative term (\\(n = 1\\) and \\(i = 1\\)) and the dominant positive term (\\(n = 1\\) and \\(i = 3\\)), i.e., \\(\\gamma_6 \\approx -3 + \\frac{1}{125}\\), which means \\(\\frac{\\partial \\mathrm{CRB}_m}{\\partial x_m} \\leq 0\\) for the case with \\(\\delta_x = 0\\). For the case of \\(\\delta_x \\neq 0\\), the CRLB can be approximated as follows:"
932
+ },
933
+ {
934
+ "type": "equation",
935
+ "bbox": [
936
+ 0.139,
937
+ 0.596,
938
+ 0.427,
939
+ 0.631
940
+ ],
941
+ "angle": 0,
942
+ "content": "\\[\n\\frac {\\partial \\mathrm {C R B} _ {m}}{\\partial x _ {m}} \\approx \\frac {\\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \\left[ - \\frac {2 \\delta_ {x}}{d _ {\\mathrm {H}} ^ {4}} + \\frac {2}{\\Delta^ {3}} \\gamma_ {6} \\right].\n\\]"
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.074,
948
+ 0.635,
949
+ 0.493,
950
+ 0.734
951
+ ],
952
+ "angle": 0,
953
+ "content": "Due to the assumption of \\(\\Delta \\gg d_{\\mathrm{H}}\\) , the term \\(\\frac{2\\delta_x}{d_{\\mathrm{H}}^4}\\) is dominant, and hence \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0\\) if \\(\\delta_{x} < 0\\) . In summary, \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} < 0\\) if the user's location is \\((x_{11}^{\\mathrm{Pin}},y_{11}^{\\mathrm{Pin}},0)\\) , and \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial x_m} >0\\) if the user's location is \\((x_{11}^{\\mathrm{Pin}} + \\delta_x,y_{11}^{\\mathrm{Pin}} + \\delta_y,d_{\\mathrm{H}})\\) . A similar conclusion can be established to \\(\\frac{\\partial\\mathrm{CRB}_m}{\\partial y_m}\\) , which means that there exists a local maximum for the CRLB around \\(\\psi_{n}^{\\mathrm{Pin}}\\)"
954
+ },
955
+ {
956
+ "type": "text",
957
+ "bbox": [
958
+ 0.074,
959
+ 0.735,
960
+ 0.493,
961
+ 0.855
962
+ ],
963
+ "angle": 0,
964
+ "content": "Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks."
965
+ },
966
+ {
967
+ "type": "title",
968
+ "bbox": [
969
+ 0.19,
970
+ 0.866,
971
+ 0.377,
972
+ 0.88
973
+ ],
974
+ "angle": 0,
975
+ "content": "IV. NUMERICAL STUDIES"
976
+ },
977
+ {
978
+ "type": "text",
979
+ "bbox": [
980
+ 0.075,
981
+ 0.885,
982
+ 0.492,
983
+ 0.945
984
+ ],
985
+ "angle": 0,
986
+ "content": "In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where \\( K_{E} = 0.01 \\), \\( D_{\\mathrm{W}} = 10 \\mathrm{~m} \\) and \\( D_{\\mathrm{L}} = 40 \\mathrm{~m} \\), unless stated otherwise."
987
+ },
988
+ {
989
+ "type": "image",
990
+ "bbox": [
991
+ 0.574,
992
+ 0.066,
993
+ 0.857,
994
+ 0.235
995
+ ],
996
+ "angle": 0,
997
+ "content": null
998
+ },
999
+ {
1000
+ "type": "image_caption",
1001
+ "bbox": [
1002
+ 0.504,
1003
+ 0.239,
1004
+ 0.924,
1005
+ 0.312
1006
+ ],
1007
+ "angle": 0,
1008
+ "content": "Fig. 1. Averaged CRLBs, \\(\\mathrm{CRB}_m\\), achieved by the considered antenna systems, where \\(N_{\\mathrm{WG}} = 2\\) and \\(d = 3\\mathrm{m}\\). For the pinching-antenna system, on each waveguide, there are \\(\\frac{N}{N_{\\mathrm{WG}}}\\) antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being \\(a\\) and its center at the origin."
1009
+ },
1010
+ {
1011
+ "type": "image",
1012
+ "bbox": [
1013
+ 0.571,
1014
+ 0.323,
1015
+ 0.841,
1016
+ 0.469
1017
+ ],
1018
+ "angle": 0,
1019
+ "content": null
1020
+ },
1021
+ {
1022
+ "type": "image_caption",
1023
+ "bbox": [
1024
+ 0.641,
1025
+ 0.479,
1026
+ 0.788,
1027
+ 0.492
1028
+ ],
1029
+ "angle": 0,
1030
+ "content": "(a) Conventional Antennas"
1031
+ },
1032
+ {
1033
+ "type": "image",
1034
+ "bbox": [
1035
+ 0.574,
1036
+ 0.508,
1037
+ 0.838,
1038
+ 0.653
1039
+ ],
1040
+ "angle": 0,
1041
+ "content": null
1042
+ },
1043
+ {
1044
+ "type": "image_caption",
1045
+ "bbox": [
1046
+ 0.649,
1047
+ 0.663,
1048
+ 0.774,
1049
+ 0.676
1050
+ ],
1051
+ "angle": 0,
1052
+ "content": "(b) Pinching Antennas"
1053
+ },
1054
+ {
1055
+ "type": "image_caption",
1056
+ "bbox": [
1057
+ 0.504,
1058
+ 0.675,
1059
+ 0.921,
1060
+ 0.713
1061
+ ],
1062
+ "angle": 0,
1063
+ "content": "Fig. 2. CRLBs achieved by the considered antenna systems. \\( N = 20 \\), \\( N_{\\mathrm{WG}} = 2 \\) and \\( d = 3 \\, \\mathrm{m} \\). On each waveguide, there are \\( \\frac{N}{N_{\\mathrm{WG}}} \\) antennas, which are equally spaced."
1064
+ },
1065
+ {
1066
+ "type": "text",
1067
+ "bbox": [
1068
+ 0.503,
1069
+ 0.717,
1070
+ 0.922,
1071
+ 0.867
1072
+ ],
1073
+ "angle": 0,
1074
+ "content": "In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where \\( \\mathrm{U}_m \\) is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that \\( \\mathrm{U}_m \\) cannot be located in a square area with its side being \\( a \\) and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of \\( N \\) and \\( a \\)."
1075
+ },
1076
+ {
1077
+ "type": "text",
1078
+ "bbox": [
1079
+ 0.503,
1080
+ 0.87,
1081
+ 0.922,
1082
+ 0.946
1083
+ ],
1084
+ "angle": 0,
1085
+ "content": "Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which"
1086
+ }
1087
+ ],
1088
+ [
1089
+ {
1090
+ "type": "page_number",
1091
+ "bbox": [
1092
+ 0.912,
1093
+ 0.031,
1094
+ 0.921,
1095
+ 0.041
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "5"
1099
+ },
1100
+ {
1101
+ "type": "image",
1102
+ "bbox": [
1103
+ 0.147,
1104
+ 0.053,
1105
+ 0.413,
1106
+ 0.197
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": null
1110
+ },
1111
+ {
1112
+ "type": "image_caption",
1113
+ "bbox": [
1114
+ 0.145,
1115
+ 0.206,
1116
+ 0.42,
1117
+ 0.226
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "(a) Positioning with a focal point at \\(\\left(-\\frac{D_{\\mathrm{L}}}{4},0,0\\right)\\)"
1121
+ },
1122
+ {
1123
+ "type": "image",
1124
+ "bbox": [
1125
+ 0.144,
1126
+ 0.241,
1127
+ 0.408,
1128
+ 0.386
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": null
1132
+ },
1133
+ {
1134
+ "type": "image_caption",
1135
+ "bbox": [
1136
+ 0.149,
1137
+ 0.394,
1138
+ 0.412,
1139
+ 0.414
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": "(b) Positioning with a focal point at \\(\\left(\\frac{D_{\\mathrm{L}}}{4},0,0\\right)\\)"
1143
+ },
1144
+ {
1145
+ "type": "image_caption",
1146
+ "bbox": [
1147
+ 0.074,
1148
+ 0.414,
1149
+ 0.492,
1150
+ 0.465
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. \\( N = 20 \\), \\( N_{\\mathrm{WG}} = 2 \\) and \\( d = 3 \\mathrm{~m} \\). On each waveguide, there are \\( \\frac{N}{N_{\\mathrm{WG}}} \\) antennas, which are equally spaced in a segment with its length being \\( \\frac{D_{\\mathrm{L}}}{2} \\) and its center at the focal points shown in the figures."
1154
+ },
1155
+ {
1156
+ "type": "text",
1157
+ "bbox": [
1158
+ 0.074,
1159
+ 0.466,
1160
+ 0.491,
1161
+ 0.601
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3."
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "bbox": [
1169
+ 0.074,
1170
+ 0.602,
1171
+ 0.493,
1172
+ 0.797
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified."
1176
+ },
1177
+ {
1178
+ "type": "title",
1179
+ "bbox": [
1180
+ 0.218,
1181
+ 0.805,
1182
+ 0.348,
1183
+ 0.819
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "V. CONCLUSIONS"
1187
+ },
1188
+ {
1189
+ "type": "text",
1190
+ "bbox": [
1191
+ 0.074,
1192
+ 0.824,
1193
+ 0.492,
1194
+ 0.947
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and"
1198
+ },
1199
+ {
1200
+ "type": "image",
1201
+ "bbox": [
1202
+ 0.574,
1203
+ 0.041,
1204
+ 0.858,
1205
+ 0.212
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": null
1209
+ },
1210
+ {
1211
+ "type": "image_caption",
1212
+ "bbox": [
1213
+ 0.504,
1214
+ 0.216,
1215
+ 0.922,
1216
+ 0.263
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "Fig. 4. Impact of the antenna spacing on the CRLB. \\( N = 4 \\) pinching antennas are activated in a square-shape area with the antenna spacing being \\( \\Delta \\) and \\( \\mathrm{U}_m \\) located at the center of the area, where \\( N_{\\mathrm{WG}} = 2 \\). The analytical results are based on (16)."
1220
+ },
1221
+ {
1222
+ "type": "text",
1223
+ "bbox": [
1224
+ 0.504,
1225
+ 0.269,
1226
+ 0.922,
1227
+ 0.3
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": "reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning."
1231
+ },
1232
+ {
1233
+ "type": "title",
1234
+ "bbox": [
1235
+ 0.665,
1236
+ 0.307,
1237
+ 0.762,
1238
+ 0.319
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "REFERENCES"
1242
+ },
1243
+ {
1244
+ "type": "ref_text",
1245
+ "bbox": [
1246
+ 0.515,
1247
+ 0.327,
1248
+ 0.922,
1249
+ 0.361
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": "[1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna - using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022."
1253
+ },
1254
+ {
1255
+ "type": "ref_text",
1256
+ "bbox": [
1257
+ 0.515,
1258
+ 0.361,
1259
+ 0.921,
1260
+ 0.394
1261
+ ],
1262
+ "angle": 0,
1263
+ "content": "[2] Z. Ding, R. Schober, and H. V. Poor, \"Flexible-antenna systems: A pinching-antenna perspective,\" IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376."
1264
+ },
1265
+ {
1266
+ "type": "ref_text",
1267
+ "bbox": [
1268
+ 0.514,
1269
+ 0.395,
1270
+ 0.921,
1271
+ 0.428
1272
+ ],
1273
+ "angle": 0,
1274
+ "content": "[3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554."
1275
+ },
1276
+ {
1277
+ "type": "ref_text",
1278
+ "bbox": [
1279
+ 0.514,
1280
+ 0.429,
1281
+ 0.921,
1282
+ 0.463
1283
+ ],
1284
+ "angle": 0,
1285
+ "content": "[4] K. Wang, Z. Ding, and R. Schober, \"Antenna activation for NOMA assisted pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969."
1286
+ },
1287
+ {
1288
+ "type": "ref_text",
1289
+ "bbox": [
1290
+ 0.514,
1291
+ 0.463,
1292
+ 0.921,
1293
+ 0.496
1294
+ ],
1295
+ "angle": 0,
1296
+ "content": "[5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, \"Array gain for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657."
1297
+ },
1298
+ {
1299
+ "type": "ref_text",
1300
+ "bbox": [
1301
+ 0.514,
1302
+ 0.496,
1303
+ 0.921,
1304
+ 0.531
1305
+ ],
1306
+ "angle": 0,
1307
+ "content": "[6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917."
1308
+ },
1309
+ {
1310
+ "type": "ref_text",
1311
+ "bbox": [
1312
+ 0.514,
1313
+ 0.531,
1314
+ 0.921,
1315
+ 0.564
1316
+ ],
1317
+ "angle": 0,
1318
+ "content": "[7] Y. Xu, Z. Ding, and G. Karagiannidis, \"Rate maximization for downlink pinching-antenna systems,\" IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629."
1319
+ },
1320
+ {
1321
+ "type": "ref_text",
1322
+ "bbox": [
1323
+ 0.514,
1324
+ 0.565,
1325
+ 0.921,
1326
+ 0.599
1327
+ ],
1328
+ "angle": 0,
1329
+ "content": "[8] X. Mu, G. Zhu, and Y. Liu, \"Pinching-antenna system (PASS)-enabled multicast communications,\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624."
1330
+ },
1331
+ {
1332
+ "type": "ref_text",
1333
+ "bbox": [
1334
+ 0.514,
1335
+ 0.599,
1336
+ 0.921,
1337
+ 0.632
1338
+ ],
1339
+ "angle": 0,
1340
+ "content": "[9] J. Xiao, J. Wang, and Y. Liu, \"Channel estimation for pinching-antenna systems (PASS),\" IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268."
1341
+ },
1342
+ {
1343
+ "type": "ref_text",
1344
+ "bbox": [
1345
+ 0.508,
1346
+ 0.633,
1347
+ 0.921,
1348
+ 0.666
1349
+ ],
1350
+ "angle": 0,
1351
+ "content": "[10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921."
1352
+ },
1353
+ {
1354
+ "type": "ref_text",
1355
+ "bbox": [
1356
+ 0.508,
1357
+ 0.666,
1358
+ 0.921,
1359
+ 0.699
1360
+ ],
1361
+ "angle": 0,
1362
+ "content": "[11] X. Xie, Y. Lu, and Z. Ding, \"Graph neural network enabled pinching antennas,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447."
1363
+ },
1364
+ {
1365
+ "type": "ref_text",
1366
+ "bbox": [
1367
+ 0.508,
1368
+ 0.7,
1369
+ 0.921,
1370
+ 0.734
1371
+ ],
1372
+ "angle": 0,
1373
+ "content": "[12] J. Guo, Y. Liu, and A. Nallanathan, \"GPASS: Deep learning for beamforming in pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438."
1374
+ },
1375
+ {
1376
+ "type": "ref_text",
1377
+ "bbox": [
1378
+ 0.508,
1379
+ 0.735,
1380
+ 0.921,
1381
+ 0.779
1382
+ ],
1383
+ "angle": 0,
1384
+ "content": "[13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892."
1385
+ },
1386
+ {
1387
+ "type": "ref_text",
1388
+ "bbox": [
1389
+ 0.508,
1390
+ 0.78,
1391
+ 0.921,
1392
+ 0.813
1393
+ ],
1394
+ "angle": 0,
1395
+ "content": "[14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, \"Physical layer security for pinching-antenna systems (PASS),\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075."
1396
+ },
1397
+ {
1398
+ "type": "ref_text",
1399
+ "bbox": [
1400
+ 0.508,
1401
+ 0.814,
1402
+ 0.921,
1403
+ 0.847
1404
+ ],
1405
+ "angle": 0,
1406
+ "content": "[15] Y. Qin, Y. Fu, and H. Zhang, \"Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems,\" IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872."
1407
+ },
1408
+ {
1409
+ "type": "ref_text",
1410
+ "bbox": [
1411
+ 0.508,
1412
+ 0.848,
1413
+ 0.921,
1414
+ 0.893
1415
+ ],
1416
+ "angle": 0,
1417
+ "content": "[16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, \"Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond,\" IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022."
1418
+ },
1419
+ {
1420
+ "type": "ref_text",
1421
+ "bbox": [
1422
+ 0.508,
1423
+ 0.894,
1424
+ 0.921,
1425
+ 0.928
1426
+ ],
1427
+ "angle": 0,
1428
+ "content": "[17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5."
1429
+ },
1430
+ {
1431
+ "type": "list",
1432
+ "bbox": [
1433
+ 0.508,
1434
+ 0.327,
1435
+ 0.922,
1436
+ 0.928
1437
+ ],
1438
+ "angle": 0,
1439
+ "content": null
1440
+ }
1441
+ ]
1442
+ ]
data/2025/2504_05xxx/2504.05792/2e1d882c-c0bf-4c6f-98d8-3ae7d8fb9f26_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c4348fab16e7367bc1a5a65714dca2086121d9f02796b0cad0528e883f7342
3
+ size 391018
data/2025/2504_05xxx/2504.05792/full.md ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pinching-Antenna Assisted ISAC: A CRLB Perspective
2
+
3
+ Zhiguo Ding, Fellow, IEEE
4
+
5
+ Abstract—Recently, pinching antennas have attracted significant research interest due to their capability to reconfigure wireless channels as well as their array configuration flexibility. This letter focuses on how these features can be used to support integrated sensing and communications (ISAC) from the Cramér-Rao lower bound (CRLB) perspective. In particular, the CRLB achieved by pinching antennas is first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrate that using pinching antennas can significantly reduce CRLB and, hence, enhance positioning accuracy. In addition, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.
6
+
7
+ Index Terms—Pinching antennas, integrated sensing and communications (ISAC), Cramér-Rao lower bound (CRLB), estimation theory.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ Recently, pinching antennas have received significant attention from both academia and industry as a novel evolution of smart antennas, and offer three distinguished features [1], [2]. One is their capability to create strong line-of-sight (LoS) links between the transceivers, which means that large-scale path losses and LoS blockage can be effectively mitigated by activating antennas close to users [3]. The second feature is the reconfigurability of pinching-antenna systems, where the topology of a pinching-antenna array, e.g., the locations and the number of pinching antennas, can be flexibly adjusted. The third feature is their practicality, where DOCOMO's prototype shows that pinching antennas can be straightforwardly implemented in a low-cost manner [1].
12
+
13
+ In the literature, there already exists a large amount of work to demonstrate that the use of pinching antennas can significantly enhance the communication functionality of wireless networks. For example, the fundamental issues of pinching antennas, such as antenna activation, the architecture of a pinching-antenna array, and the array gains, have been investigated in [4]–[6]. Antenna placement is key to realizing the full potential of pinching-antenna systems, where various designs and their impact on the system throughput have been investigated in [7], [8]. Channel estimation and beam training are crucial issues to pinching-antenna systems, and sophisticated designs using the flexibility features of pinching antennas have been developed in [9], [10]. For many resource allocation problems encountered in pinching-antenna systems, the use of conventional convex optimization leads to high computational complexity, which motivates the application of advanced learning methods [11], [12]. The applications of pinching antennas to improve the uplink throughput and the security of communication networks have also been recently investigated in [13], [14].
14
+
15
+ However, we note that the impact of pinching antennas on the sensing functionality of wireless networks has not yet been fully characterized in the literature, although the recent work in [15] demonstrated the importance of pinching
16
+
17
+ Z. Ding is with the University of Manchester, Manchester, M1 9BB, UK, and Khalifa University, Abu Dhabi, UAE.
18
+
19
+ antennas in integrated sensing and communication (ISAC) systems [16], which motivates this letter. In particular, in this letter, the Cramér-Rao lower bound (CRLB) is used as the performance metric to characterize the capability of pinching antennas for enhancing the positioning accuracy of ISAC networks. The CRLB achieved by pinching antennas is first derived in the letter, and then compared to conventional antennas. The presented analytical results reveal that the use of pinching antennas can ensure that users at different locations experience uniform positioning accuracy, whereas the use of conventional antennas can result in a significant disparity in accuracy among the users. In addition, the important properties of CRLB achieved by pinching antennas, such as the effects of antenna placement and the local maximums of CRLB, are also investigated. Furthermore, this letter also reveals that the low-cost and reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.
20
+
21
+ # II. SYSTEM MODEL
22
+
23
+ Consider a pinching-antenna system that is deployed to provide ISAC services to $M$ single-antenna users, denoted by $\mathrm{U}_m$ . Given the fact that there is already a rich literature on using pinching antennas to enhance communications, and also due to space limitations, the impact of pinching antennas on the sensing functionality is focused on in this letter. Without loss of generality, assume that $N$ pinching antennas are activated on $N_{\mathrm{WG}}$ waveguides. The location of the $n$ -th pinching antenna is denoted by $\psi_n^{\mathrm{Pin}} = (x_n^{\mathrm{Pin}}, y_n^{\mathrm{Pin}}, d_{\mathrm{H}})$ , where $d_{\mathrm{H}}$ denotes the height of the waveguides.
24
+
25
+ The service area is denoted by $\mathcal{A}$ and is assumed to be a rectangle with its two sides denoted by $D_{\mathrm{W}}$ and $D_{\mathrm{L}}$ , respectively, and its center located at $(0,0,0)$ . The users are assumed to be uniformly distributed in $\mathcal{A}$ , and $\mathrm{U}_m$ 's location is denoted by $\psi_m = (x_m,y_m,0)$ .
26
+
27
+ Denote the distance from the $n$ -th pinching antenna to the $m$ -th user by $d_{mn}$ . Distance (range) estimates for the $m$ -th user can be modeled as follows: [17]
28
+
29
+ $$
30
+ \hat {d} _ {m n} = d _ {m n} + w _ {m n}, \tag {1}
31
+ $$
32
+
33
+ where $d_{mn} = \sqrt{(x_m - x_n^{\mathrm{Pin}})^2 + (y_m - y_n^{\mathrm{Pin}})^2 + d_{\mathrm{H}}^2}$ , and $w_{mn}$ is a zero-mean Gaussian distributed noise term whose variance is distance-dependent, i.e.,
34
+
35
+ $$
36
+ \sigma_ {m n} ^ {2} = K _ {E} \left(\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right), \tag {2}
37
+ $$
38
+
39
+ $K_{E}$ denotes a system parameter decided by the range estimation environment.
40
+
41
+ # III. IMPACT OF PINCHING ANTENNAS ON POSITIONING
42
+
43
+ # A. CRLB Achieved by Pinching-Antenna Systems
44
+
45
+ Without loss of generality, the impact of pinching antennas on $\mathrm{U}_m$ 's localization is focused on. The joint probability den
46
+
47
+ sity function (pdf) of $\hat{d}_{mn}$ conditioned on $d_{mn}$ , $1\leq n\leq N$ , is given by
48
+
49
+ $$
50
+ f (\hat {d} _ {m 1}, \dots , \hat {d} _ {m N}) = \prod_ {n = 1} ^ {N} \frac {1}{\sqrt {2 \pi \sigma_ {m n} ^ {2}}} e ^ {- \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \sigma_ {m n} ^ {2}}}, \tag {3}
51
+ $$
52
+
53
+ whose log-likelihood function is given by
54
+
55
+ $$
56
+ \begin{array}{l} L \triangleq \ln f (\hat {d} _ {m 1}, \dots , \hat {d} _ {m N}) = - \frac {N}{2} \ln (2 \pi) \tag {4} \\ - \sum_ {n = 1} ^ {N} \ln \sigma_ {m n} - \sum_ {n = 1} ^ {N} \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{2 \sigma_ {m n} ^ {2}}. \\ \end{array}
57
+ $$
58
+
59
+ Recall that the CRLB for $x_{m}$ and $y_{m}$ is given by
60
+
61
+ $$
62
+ \mathcal {E} \left\{\left(\hat {x} _ {m} - x _ {m}\right) ^ {2} + \left(\hat {y} _ {m} - y _ {m}\right) ^ {2} \right\} \geq \frac {1}{J _ {x} ^ {m}} + \frac {1}{J _ {y} ^ {m}} \triangleq \mathrm {C R B} _ {m}, \tag {5}
63
+ $$
64
+
65
+ where $\hat{x}_m$ and $\hat{y}_m$ denote the estimates of $x_m$ and $y_m$ , respectively, $J_x^m = \mathcal{E}\left\{-\frac{\partial^2L}{\partial x_m^2}\right\}$ and $J_y^m = \mathcal{E}\left\{-\frac{\partial^2L}{\partial y_m^2}\right\}$ .
66
+
67
+ $\frac{\partial L}{\partial x_m}$ can be obtained as follows:
68
+
69
+ $$
70
+ \begin{array}{l} \frac {\partial L}{\partial x _ {m}} = - \sum_ {n = 1} ^ {N} \frac {1}{\sigma_ {m n}} \frac {\partial \sigma_ {m n}}{\partial x _ {m}} - \sum_ {n = 1} ^ {N} \frac {\left(d _ {m n} - \hat {d} _ {m n}\right)}{\sigma_ {m n} ^ {2}} \frac {\partial d _ {m n}}{\partial x _ {m}} \tag {6} \\ + \sum_ {n = 1} ^ {N} \frac {(\hat {d} _ {m n} - d _ {m n}) ^ {2}}{\sigma_ {m n} ^ {3}} \frac {\partial \sigma_ {m n}}{\partial x _ {m}}. \\ \end{array}
71
+ $$
72
+
73
+ The expression of $\frac{\partial^2L}{\partial x_m^2}$ is quite invoked; however, by using the fact that $\mathcal{E}\{\hat{d}_{mn} - d_{mn}\} = 0$ and following the steps similar to those in [17], the expectation of $\frac{\partial^2L}{\partial x_m^2}$ , i.e., $J_x^m$ , can be obtained as follows:
74
+
75
+ $$
76
+ J _ {x} ^ {m} = \sum_ {n = 1} ^ {N} \frac {\left(2 K _ {E} + 1\right)}{\sigma_ {m n} ^ {2}} \frac {\left(x _ {m} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2}}{\left(x _ {m} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}}. \tag {7}
77
+ $$
78
+
79
+ $J_{y}^{m}$ can be obtained in a similar form, which means that the CRLB for estimating $\mathrm{U}_m$ 's location can be expressed as follows:
80
+
81
+ $$
82
+ \begin{array}{l} \mathrm {C R B} _ {m} = \frac {K _ {E}}{(2 K _ {E} + 1)} \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(x _ {m} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {m} - y _ {n} ^ {\operatorname* {P i n}}\right) ^ {2}}{\left(\left(x _ {m} - x _ {n} ^ {\operatorname* {P i n}}\right) ^ {2} + \left(y _ {m} - y _ {n} ^ {\operatorname* {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right). \tag {8} \\ \end{array}
83
+ $$
84
+
85
+ # B. Performance Analysis Based on CRLB
86
+
87
+ 1) Performance Gain over Conventional Antennas: For the conventional-antenna benchmark, consider the use of a circular antenna array with its center located at $(0,0,0)$ and its radius being $\frac{\lambda}{4\sin\left(\frac{\pi}{N}\right)}$ , which ensures that the minimal pairwise distance of the antennas is $\frac{\lambda}{2}$ , where $\lambda$ denotes the wavelength. By using the fact that the users are uniformly distributed within the service area, the performance gain of pinching antennas over conventional antennas can be evaluated as follows:
88
+
89
+ $$
90
+ \Delta_ {\mathrm {C R B}} = \int_ {- \frac {D _ {\mathrm {L}}}{2}} ^ {\frac {D _ {\mathrm {L}}}{2}} \int_ {- \frac {D _ {\mathrm {W}}}{2}} ^ {\frac {D _ {\mathrm {W}}}{2}} \left(\mathrm {C R B} _ {m} - \mathrm {C R B} _ {m} ^ {\text {C o n v}}\right) \frac {d y _ {m}}{D _ {\mathrm {W}}} \frac {d x _ {m}}{D _ {\mathrm {L}}}, \tag {9}
91
+ $$
92
+
93
+ where $\mathrm{CRB}_m^{\mathrm{Conv}}$ can be obtained similarly to $\mathrm{CRB}_m$ by replacing the locations of the pinching antennas with those of the conventional antennas. The performance gain in (9) can be straightforwardly evaluated via computer simulations, but a closed-form expression of $\Delta_{\mathrm{CRB}}$ is difficult to obtain due to the factional expression of the CRLB. We note that the performance gain of pinching antennas over conventional antennas can also be illustrated by simply focusing on the user which is located at $\left(\frac{D_{\mathrm{L}}}{2},0,0\right)$ . The use of conventional antennas can achieve the following CRLB:
94
+
95
+ $$
96
+ \begin{array}{l} \mathrm {C R B} _ {m} ^ {\mathrm {C o n v}} = \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {C o n v}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {C o n v}}\right) ^ {2} + (y _ {n} ^ {\mathrm {C o n v}}) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {n} ^ {\text {C o n v}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\text {C o n v}}\right) ^ {2} + \left(y _ {n} ^ {\text {C o n v}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right) \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \\ \stackrel {(a)} {\approx} \frac {K _ {E}}{(2 K _ {E} + 1)} \left(\frac {4 \left(\frac {D _ {\mathrm {L}} ^ {2}}{4} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{N D _ {\mathrm {L}} ^ {2}} + \frac {\left(\frac {D _ {\mathrm {L}} ^ {2}}{4} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\sum_ {n = 1} ^ {N} (y _ {n} ^ {\mathrm {C o n v}}) ^ {2}}\right) \\ \xrightarrow {(b)} \infty , \tag {10} \\ \end{array}
97
+ $$
98
+
99
+ where step (a) is due to the fact that the conventional antennas are clustered close to the center of the service area, and step (b) is due to the fact that $|y_{n}^{\mathrm{Conv}}| \to 0$ for conventional antennas, particularly for the case with high carrier frequencies (i.e., small wavelengths).
100
+
101
+ On the other hand, pinching antennas do not suffer the singularity issue experienced by conventional antennas. For example, for the user located at $\left(\frac{D_{\mathrm{L}}}{2},0,0\right)$ , the corresponding CRLB can be expressed as follows:
102
+
103
+ $$
104
+ \begin{array}{l} \mathrm {C R B} _ {m} = \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + (y _ {n} ^ {\mathrm {P i n}}) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}} \right. \\ \left. + \frac {1}{\sum_ {n = 1} ^ {N} \frac {\left(y _ {n} ^ {\mathrm {P i n}}\right) ^ {2}}{\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\mathrm {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}}\right). \tag {11} \\ \end{array}
105
+ $$
106
+
107
+ For illustrative purposes, a simple upper bound on the CRLB achieved by pinching antennas can be obtained as follows:
108
+
109
+ $$
110
+ \begin{array}{l} \mathrm {C R B} _ {m} \leq \frac {K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\operatorname {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\operatorname {P i n}}\right) ^ {2}} \right. \\ \left. + \frac {\left(\left(\frac {D _ {\mathrm {L}}}{2} - x _ {n} ^ {\text {P i n}}\right) ^ {2} + \left(y _ {n} ^ {\text {P i n}}\right) ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}{\left(y _ {n} ^ {\text {P i n}}\right) ^ {2}}\right), \tag {12} \\ \end{array}
111
+ $$
112
+
113
+ where $n$ is an arbitrary integer between 1 and $N$ . Because of the diverse locations of the $N$ pinching antennas, it is always possible to find $n \in \{1, \dots, N\}$ which yields a finite value for the upper bound shown in (12), i.e., the CRLB achieved by pinching antennas is always bounded.
114
+
115
+ Remark 1: Unlike conventional antennas which can cause noticeable accuracy variations between users, the carried-out case study shows that pinching antennas have the ability to offer uniform positioning accuracy between the users.
116
+
117
+ 2) Flexible User-Centric Positioning: Due to their low-cost and reconfigurability features, the locations of pinching antennas can be tailored to a serving user for realizing flexible user-centric positioning. To facilitate the performance analysis, the association between the pinching antennas and the waveguides is required. Without loss of generality, assume that there are $\tilde{N} = \frac{N}{N_{\mathrm{WG}}}$ pinching antennas on each waveguide. Denote the location of the $n$ -th antenna on the $i$ -th waveguide by $\psi_{in}^{\mathrm{Pin}} = (x_{in}^{\mathrm{Pin}},y_{in}^{\mathrm{Pin}},d_{\mathrm{H}})$ . Furthermore, assume that the antennas are equally spaced, and define $\Delta_x = |x_{in}^{\mathrm{Pin}} - x_{im}^{\mathrm{Pin}}|$ and $\Delta_y = |x_{in}^{\mathrm{Pin}} - x_{jn}^{\mathrm{Pin}}|$ , $m\neq n$ and $i\neq j$ .
118
+
119
+ For illustrative purposes, assume that all $N$ pinching antennas are activated in a square area with $\mathrm{U}_m$ at its center, where $\tilde{N} = N_{\mathrm{WG}}$ and $\Delta_x = \Delta_y = \Delta$ . This assumption is made to facilitate the performance analysis, and more practical setups will be considered in the simulation section. Define $\bar{N} = \frac{\tilde{N}}{2}$ , and without loss of generality, assume that $\bar{N}$ is an even number.
120
+
121
+ With these assumptions, the CRLB in (8) can be simplified as follows:
122
+
123
+ $$
124
+ \mathrm {C R B} _ {m} = \frac {\frac {K _ {E} \Delta^ {2}}{4 (2 K _ {E} + 1)}}{\sum_ {i = 1} ^ {\bar {N}} \sum_ {n = 1} ^ {\bar {N}} \frac {(n - \frac {1}{2}) ^ {2}}{\beta_ {n i} ^ {2}}} + \frac {\frac {K _ {E} \Delta^ {2}}{4 (2 K _ {E} + 1)}}{\sum_ {i = 1} ^ {\bar {N}} \sum_ {n = 1} ^ {\bar {N}} \frac {(i - \frac {1}{2}) ^ {2}}{\beta_ {n i} ^ {2}}},
125
+ $$
126
+
127
+ where $= \left(n - \frac{1}{2}\right)^{2} + \left(i - \frac{1}{2}\right)^{2} + \frac{d_{\mathrm{H}}^{2}}{\Delta^{2}}$ . The above CRLB can be used to design the antenna placement, i.e., the optimal choice of $\Delta$ for minimizing the CRLB. Computer simulations can be used to verify that $\frac{\partial^2\mathrm{CRB}_m}{\partial\Delta^2} > 0$ , i.e., $\mathrm{CRB}_m$ is a convex function of $\Delta$ , and hence convex optimization solvers can be used to find the optimal solution of $\Delta$ efficiently. To obtain an insightful understanding of the optimal choice of $\Delta$ , a special case with $N = 4$ is focused on in the following. We note that this special case is important in practice, given the fact that using a small number of antennas is helpful in reducing system overhead. For the case with $N = 4$ , the CRLB can be simplified as follows:
128
+
129
+ $$
130
+ \mathrm {C R B} _ {m} = \frac {2 K _ {E} \Delta^ {2}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{2} + \frac {d _ {\mathrm {H}} ^ {2}}{\Delta^ {2}}\right) ^ {2}, \tag {13}
131
+ $$
132
+
133
+ whose first-order derivative is given by
134
+
135
+ $$
136
+ \frac {\partial \mathrm {C R B} _ {m}}{\partial \Delta} = \frac {4 K _ {E}}{(2 K _ {E} + 1)} \left(\frac {1}{2} \Delta + \frac {d _ {\mathrm {H}} ^ {2}}{\Delta}\right) \left(\frac {1}{2} - \frac {d _ {\mathrm {H}} ^ {2}}{\Delta^ {2}}\right). \tag {14}
137
+ $$
138
+
139
+ The second-order derivative of $\mathrm{CRB}_m$ is given by
140
+
141
+ $$
142
+ \frac {\partial^ {2} \mathrm {C R B} _ {m}}{\partial \Delta^ {2}} = \frac {4 K _ {E}}{\left(2 K _ {E} + 1\right)} \left(\frac {1}{4} + 3 \frac {d _ {\mathrm {H}} ^ {4}}{\Delta^ {4}}\right) > 0, \tag {15}
143
+ $$
144
+
145
+ which means that $\mathrm{CRB}_m$ is a convex function of $\Delta$ . Therefore, the optimal solution of $\Delta$ for minimizing the CRLB for the special case with $N = 4$ is given by
146
+
147
+ $$
148
+ \Delta^ {*} = \sqrt {2} d _ {H}. \tag {16}
149
+ $$
150
+
151
+ Remark 2: An intuition is that the CRLB is minimized if all the antennas are placed as close to the user as possible, i.e., $\Delta^{*} \to 0$ (or $\frac{\lambda}{2}$ to avoid antenna coupling). (16) shows that this intuition is wrong, where the optimal antenna spacing is a function of the height of the waveguides.
152
+
153
+ 3) Local-Maximum Property of CRLB: In the proximity of each pinching antenna, $\psi_{n}^{\mathrm{Pin}}$ , there exists a local maximum of $\mathrm{CRB}_m$ shown in (8). This local-maximum property can be revealed by studying $\frac{\partial\mathrm{CRB}_m}{\partial x_m}$ and $\frac{\partial\mathrm{CRB}_m}{\partial y_m}$ . Without loss of generality, $\frac{\partial\mathrm{CRB}_m}{\partial x_m}$ is focused, and can be expressed as follows:
154
+
155
+ $$
156
+ \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} = \frac {K _ {E}}{(2 K _ {E} + 1)} \left(- \frac {1}{\gamma_ {1} ^ {2}} [ \gamma_ {2} - \gamma_ {3} ] + \frac {1}{\gamma_ {4} ^ {2}} \gamma_ {5}\right), \tag {17}
157
+ $$
158
+
159
+ where $d_{mn}^2 = \left(x_m - x_n^{\mathrm{Pin}}\right)^2 +\left(y_m - y_n^{\mathrm{Pin}}\right)^2 +d_{\mathrm{H}}^2,$ $\gamma_{1} = \sum_{n = 1}^{N}\frac{\left(x_{m} - x_{n}^{\mathrm{Pin}}\right)^{2}}{d_{mn}^{4}},\gamma_{2} = \sum_{n = 1}^{N}\frac{2\left(x_{m} - x_{n}^{\mathrm{Pin}}\right)}{d_{mn}^{4}},\gamma_{3} =$ $\begin{array}{r}\sum_{n = 1}^{N}\frac{4\left(x_m - x_n^{\mathrm{Pin}}\right)^3}{d_{mn}^6},\gamma_4 = \sum_{n = 1}^{N}\frac{\left(y_m - y_n^{\mathrm{Pin}}\right)^2}{d_{mn}^2},\mathrm{and}\gamma_5 = \end{array}$ $\begin{array}{r}\sum_{n = 1}^{N}\frac{4\left(x_m - x_n^{\mathrm{Pin}}\right)\left(y_m - y_n^{\mathrm{Pin}}\right)^2}{d_{mn}^6}. \end{array}$
160
+
161
+ Without loss of generality, assume that $\mathrm{U}_m$ is in the proximity of the first pinching antenna on the first waveguide, i.e., $x_{m} = x_{11}^{\mathrm{Pin}} + \delta_{x}$ and $y_{m} = y_{11}^{\mathrm{Pin}} + \delta_{y}$ , where $\delta_x\to 0$ and $\delta_y\rightarrow 0$ . In this case, $\gamma_{1}$ in (17) can be approximated as follows:
162
+
163
+ $$
164
+ \gamma_ {1} \approx \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n ^ {2} \Delta_ {x} ^ {2}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}, \tag {18}
165
+ $$
166
+
167
+ where the terms at the order of $\delta_x^2$ are omitted. Similarly, by omitting the terms of $\delta_x^2$ , $\gamma_2$ can be approximated as follows:
168
+
169
+ $$
170
+ \begin{array}{l} \gamma_ {2} \approx \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{\left(\delta^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}} \tag {19} \\ - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {2 n \Delta_ {x}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}. \\ \end{array}
171
+ $$
172
+
173
+ Similarly, $\gamma_3, \gamma_4$ and $\gamma_5$ can be approximated as follows:
174
+
175
+ $$
176
+ \gamma_ {3} \approx - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta_ {x} ^ {3}}{\left(n ^ {2} \Delta_ {x} ^ {2} + (i - 1) ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}}, \tag {20}
177
+ $$
178
+
179
+ $$
180
+ \gamma_ {4} \approx \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N}} \frac {i ^ {2} \Delta_ {y} ^ {2}}{\left((n - 1) ^ {2} \Delta_ {x} ^ {2} + i ^ {2} \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {2}}, \tag {21}
181
+ $$
182
+
183
+ $$
184
+ \gamma_ {5} \approx - \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n \Delta_ {x} i ^ {2} \Delta_ {y} ^ {2}}{\left(n ^ {2} \Delta_ {x} ^ {2} + i \Delta_ {y} ^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}}. \tag {22}
185
+ $$
186
+
187
+ To facilitate the analysis of this local-maximum property of CRLB, assume that $\Delta_x = \Delta_y = \Delta \gg d_{\mathrm{H}}$ and $\tilde{N} = \frac{N}{\tilde{N}}$ , which
188
+
189
+ means that $\gamma_{1} = \gamma_{3}$ , and hence the CRLB can be simplified as follows:
190
+
191
+ $$
192
+ \begin{array}{l} \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{(\delta^ {2} + (i - 1) ^ {2} \Delta^ {2} + d _ {\mathrm {H}} ^ {2}) ^ {2}} \right. \\ \left. + \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {2 n \Delta}{\bar {\beta} _ {n i} ^ {2}} - \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta^ {3}}{\bar {\beta} _ {n i} ^ {3}} - \sum_ {i = 1} ^ {\frac {N}{N} - 1} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {4 n ^ {3} \Delta^ {3}}{\bar {\beta} _ {n i} ^ {3}} \right], \\ \end{array}
193
+ $$
194
+
195
+ where $\bar{\beta}_{ni} = (n^2 + (i - 1)^2)\Delta^2 + d_{\mathrm{H}}^2$
196
+
197
+ Note that if $i = \frac{N}{N}$ , $\sum_{n=1}^{\tilde{N}-1} \frac{4n^3\Delta^3}{\left((n^2+(i-1)^2)\Delta^2+d_{\mathrm{H}}^2\right)^3}$ is an insignificant term, which means that the CRLB can be further simplified as follows:
198
+
199
+ $$
200
+ \begin{array}{l} \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \sum_ {i = 1} ^ {\frac {N}{N}} \frac {2 \delta_ {x}}{(\delta^ {2} + (i - 1) ^ {2} \Delta^ {2} + d _ {\mathrm {H}} ^ {2}) ^ {2}} \right. \\ \left. + 2 \Delta \sum_ {i = 1} ^ {\frac {N}{N}} \sum_ {n = 1} ^ {\tilde {N} - 1} \frac {n \left(\left((i - 1) ^ {2} - 3 n ^ {2}\right) \Delta^ {2} + d _ {\mathrm {H}} ^ {2}\right)}{\left(\left(n ^ {2} + (i - 1) ^ {2}\right) \Delta^ {2} + d _ {\mathrm {H}} ^ {2}\right) ^ {3}} \right]. \tag {23} \\ \end{array}
201
+ $$
202
+
203
+ For the case with $\delta_x = 0$ , i.e., the user is located right underneath of the pinching antenna at $\psi_{11}^{\mathrm{Pin}}$ , by using the assumption that $\Delta \gg d$ , the CRLB can be expressed as follows:
204
+
205
+ $$
206
+ \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \frac {2}{\Delta^ {3}} \gamma_ {6}, \tag {24}
207
+ $$
208
+
209
+ where $\gamma_6 = \sum_{i=1}^{N} \sum_{n=1}^{\tilde{N}-1} \frac{(i-1)^2 - 3n^2}{(n^2 + (i-1)^2)^3}$ . We note that the terms of $\gamma_6$ decay rapidly by increasing $n$ and $i$ , i.e., $\gamma_6$ can be approximated by keeping the dominant negative term ( $n = 1$ and $i = 1$ ) and the dominant positive term ( $n = 1$ and $i = 3$ ), i.e., $\gamma_6 \approx -3 + \frac{1}{125}$ , which means $\frac{\partial \mathrm{CRB}_m}{\partial x_m} \leq 0$ for the case with $\delta_x = 0$ . For the case of $\delta_x \neq 0$ , the CRLB can be approximated as follows:
210
+
211
+ $$
212
+ \frac {\partial \mathrm {C R B} _ {m}}{\partial x _ {m}} \approx \frac {\gamma_ {1} K _ {E}}{(2 K _ {E} + 1)} \left[ - \frac {2 \delta_ {x}}{d _ {\mathrm {H}} ^ {4}} + \frac {2}{\Delta^ {3}} \gamma_ {6} \right].
213
+ $$
214
+
215
+ Due to the assumption of $\Delta \gg d_{\mathrm{H}}$ , the term $\frac{2\delta_x}{d_{\mathrm{H}}^4}$ is dominant, and hence $\frac{\partial\mathrm{CRB}_m}{\partial x_m} >0$ if $\delta_{x} < 0$ . In summary, $\frac{\partial\mathrm{CRB}_m}{\partial x_m} < 0$ if the user's location is $(x_{11}^{\mathrm{Pin}},y_{11}^{\mathrm{Pin}},0)$ , and $\frac{\partial\mathrm{CRB}_m}{\partial x_m} >0$ if the user's location is $(x_{11}^{\mathrm{Pin}} + \delta_x,y_{11}^{\mathrm{Pin}} + \delta_y,d_{\mathrm{H}})$ . A similar conclusion can be established to $\frac{\partial\mathrm{CRB}_m}{\partial y_m}$ , which means that there exists a local maximum for the CRLB around $\psi_{n}^{\mathrm{Pin}}$
216
+
217
+ Remark 3: The local maximum property of the CRLB indicates an interesting conflict between the communication and sensing functionalities of pinching antennas. In particular, placing a pinching antenna directly above the user might increase the user's data rate but also degrade positioning accuracy. In other words, this local maximum property reveals the importance of antenna placement in pinching-antenna assisted ISAC networks.
218
+
219
+ # IV. NUMERICAL STUDIES
220
+
221
+ In this section, computer simulation results are presented to demonstrate the impact of pinching antennas on the positioning accuracy, where $K_{E} = 0.01$ , $D_{\mathrm{W}} = 10 \mathrm{~m}$ and $D_{\mathrm{L}} = 40 \mathrm{~m}$ , unless stated otherwise.
222
+
223
+ ![](images/de2fd461e68d196df1294074a08a6e8f1ae03b5be4657fc9a3ee1fe1bcd21315.jpg)
224
+ Fig. 1. Averaged CRLBs, $\mathrm{CRB}_m$ , achieved by the considered antenna systems, where $N_{\mathrm{WG}} = 2$ and $d = 3\mathrm{m}$ . For the pinching-antenna system, on each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced. Due to the singularity issue experienced by conventional antennas discussed in Section III-B1, users are assumed to be excluded from a square area with its side being $a$ and its center at the origin.
225
+
226
+ ![](images/8c86d7c82be13cfdb700185fedbcfc043903bcacc188a31c96bd78c0d022b95d.jpg)
227
+ (a) Conventional Antennas
228
+
229
+ ![](images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg)
230
+ (b) Pinching Antennas
231
+ Fig. 2. CRLBs achieved by the considered antenna systems. $N = 20$ , $N_{\mathrm{WG}} = 2$ and $d = 3 \, \mathrm{m}$ . On each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced.
232
+
233
+ In Fig. 1, the averaged CRLBs achieved by the conventional and pinching-antenna systems are shown as functions of the number of antennas, where $\mathrm{U}_m$ is assumed to be uniformly deployed in the service area. Because the conventional-antenna system suffers the singularity issue discussed in Section III-B1, it is assumed that $\mathrm{U}_m$ cannot be located in a square area with its side being $a$ and its center at the origin. As can be seen from Fig. 1, the use of pinching antennas yields a significant performance gain over conventional antennas, regardless of the choices of $N$ and $a$ .
234
+
235
+ Fig. 2 is provided to highlight the fact that a user's positioning accuracy depends on its location. On the one hand, Fig. 2(a) shows that for conventional antennas, a user can experience extremely poor positioning accuracy if it is located far away from the center of the service area, which
236
+
237
+ ![](images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg)
238
+ (a) Positioning with a focal point at $\left(-\frac{D_{\mathrm{L}}}{4},0,0\right)$
239
+
240
+ ![](images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg)
241
+ (b) Positioning with a focal point at $\left(\frac{D_{\mathrm{L}}}{4},0,0\right)$
242
+ Fig. 3. Using pinching antennas to achieve flexible user-centric positioning. $N = 20$ , $N_{\mathrm{WG}} = 2$ and $d = 3 \mathrm{~m}$ . On each waveguide, there are $\frac{N}{N_{\mathrm{WG}}}$ antennas, which are equally spaced in a segment with its length being $\frac{D_{\mathrm{L}}}{2}$ and its center at the focal points shown in the figures.
243
+
244
+ confirms the analytical results shown in (10). On the other hand, Fig. 2(b) shows that the use of pinching antennas ensures reasonably accurate positioning, regardless of whether the user is at the center or the edge of the service area. This also means that for the multi-user scenario, using pinching antennas can ensure fairness for the users' positioning accuracy. We note that in Fig. 2(b), local maximums are clearly visible in the proximity of the pinching antennas, which confirms the analysis shown in Section III-B3.
245
+
246
+ Recall that one key feature of pinching antennas is their reconfiguration capabilities, where the number and the locations of the antennas can be changed in a flexible manner. Fig. 3 demonstrates how this reconfiguration feature can be used to achieve flexible user-centric positioning. In particular, Figs. 3(a) and 3(b) show that by activating the pinching antennas close to the intended user locations, different focal points can be realized, which means that users close to these focal points can enjoy high positioning accuracy. For the case where the pinching antennas are clustered close to a user, Fig. 4 is provided to show the impact of the antenna spacing on the CRLB, where the accuracy of the analytical results developed in (16) is also verified.
247
+
248
+ # V. CONCLUSIONS
249
+
250
+ This letter investigated how the key features of pinching antennas can be used to support ISAC from the CRLB perspective. In particular, the CRLB achieved by pinching antennas was first derived and then compared to that of conventional antennas. The presented analytical and simulation results demonstrated that the use of pinching antennas can significantly reduce CRLB and, hence, enhance the sensing capability. In addition, this letter showed that the low-cost and
251
+
252
+ ![](images/95e52804193e9fcc17868d063866468da9677490c21f4494ad8893fb6d18da17.jpg)
253
+ Fig. 4. Impact of the antenna spacing on the CRLB. $N = 4$ pinching antennas are activated in a square-shape area with the antenna spacing being $\Delta$ and $\mathrm{U}_m$ located at the center of the area, where $N_{\mathrm{WG}} = 2$ . The analytical results are based on (16).
254
+
255
+ reconfigurability features of pinching antennas can be utilized to realize flexible user-centric positioning.
256
+
257
+ # REFERENCES
258
+
259
+ [1] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, "Pinching antenna - using a dielectric waveguide as an antenna," NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022.
260
+ [2] Z. Ding, R. Schober, and H. V. Poor, "Flexible-antenna systems: A pinching-antenna perspective," IEEE Trans. Commun., (to appear in 2025) Available on-line at arXiv:2412.02376.
261
+ [3] Z. Ding and H. V. Poor, “Los blockage in pinching-antenna systems: Curse or blessing?” IEEE Wireless Commun. Lett., (submitted) Available on-line at arXiv:2503.08554.
262
+ [4] K. Wang, Z. Ding, and R. Schober, "Antenna activation for NOMA assisted pinching-antenna systems," IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13969.
263
+ [5] C. Ouyang, Z. Wang, Y. Liu, and Z. Ding, "Array gain for pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2501.05657.
264
+ [6] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, "Modeling and beamforming optimization for pinching-antenna systems," IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05917.
265
+ [7] Y. Xu, Z. Ding, and G. Karagiannidis, "Rate maximization for downlink pinching-antenna systems," IEEE Commun. Lett., (to appear in 2025) Available on-line at arXiv:2502.12629.
266
+ [8] X. Mu, G. Zhu, and Y. Liu, "Pinching-antenna system (PASS)-enabled multicast communications," IEEE Trans. Commun., (submitted) Available on-line at arXiv:2502.16624.
267
+ [9] J. Xiao, J. Wang, and Y. Liu, "Channel estimation for pinching-antenna systems (PASS)," IEEE Trans. Commun., (submitted) Available on-line at arXiv:2503.13268.
268
+ [10] ——, “Beam training for pinching-antenna systems (PASS),” IEEE Trans. Wireless Commun., (submitted) Available on-line at arXiv:2502.05921.
269
+ [11] X. Xie, Y. Lu, and Z. Ding, "Graph neural network enabled pinching antennas," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.05447.
270
+ [12] J. Guo, Y. Liu, and A. Nallanathan, "GPASS: Deep learning for beamforming in pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2502.01438.
271
+ [13] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, "Minimum data rate maximization for uplink pinching-antenna systems," IEEE Wireless Commun. Lett., (to appear in 2025) Available on-line at arXiv:2412.13892.
272
+ [14] M. Sun, C. Ouyang, S. Wu, and Y. Liu, "Physical layer security for pinching-antenna systems (PASS)," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.09075.
273
+ [15] Y. Qin, Y. Fu, and H. Zhang, "Joint antenna position and transmit power optimization for pinching antenna-assisted ISAC systems," IEEE Commun. Lett., (submitted) Available on-line at arXiv:2503.12872.
274
+ [16] F. Liu, Y. Cui, C. Masouros, J. Xu, T. X. Han, Y. C. Eldar, and S. Buzzi, "Integrated sensing and communications: Toward dual-functional wireless networks for 6G and beyond," IEEE J. Sel. Areas Commun., vol. 40, no. 6, pp. 1728-1767, 2022.
275
+ [17] T. Jia and R. M. Buehrer, “A new cramer-rao lower bound for TOA-based localization,” in Proc. Military Commun. Conf. (MILCOM 2008), Nov. 2008, pp. 1-5.
data/2025/2504_05xxx/2504.05792/images/01ea50d42286274380a45111164366e7d2a8affde9e260718f721bf2d384510e.jpg ADDED

Git LFS Details

  • SHA256: f945eaaacfc3d649fdd2d89dde6d017f179ffa38937b2da124bff7f60a90ed85
  • Pointer size: 130 Bytes
  • Size of remote file: 14.4 kB
data/2025/2504_05xxx/2504.05792/images/0b193879da9018fe9108b7b97eb52ae05825c2c4ca59ee37a9665be50f9dd931.jpg ADDED

Git LFS Details

  • SHA256: 506dcf2c6f94d528f29d08e1e6f93d4b386e8d03f60053ccc485e4e238fe371c
  • Pointer size: 130 Bytes
  • Size of remote file: 10.9 kB
data/2025/2504_05xxx/2504.05792/images/14299fc635244dedb6a0531d4d4b05a4d9af566abd191463f2ee7da4fce95e15.jpg ADDED

Git LFS Details

  • SHA256: cd58dd95799da7da75bf34a770ccb5802ecc145d3a551e33b11ece0f8238b657
  • Pointer size: 129 Bytes
  • Size of remote file: 6.53 kB
data/2025/2504_05xxx/2504.05792/images/1471639b0f119a70aea449f2c23ef35cea4d5de252869d12349b3c54fafbf1c3.jpg ADDED

Git LFS Details

  • SHA256: aa1f32b41b1f75fd0b2acea2df906a18b45f3a4421e806f3807b7b42e2bc3495
  • Pointer size: 130 Bytes
  • Size of remote file: 13.4 kB
data/2025/2504_05xxx/2504.05792/images/151d1b9a99db6dd96408c9f3ba705c7a8bdad63c519ff3859798eac266f15097.jpg ADDED

Git LFS Details

  • SHA256: a9517c0e708f2de3ece914d741afe99f273780747722cf4a0f6816ce6e37e239
  • Pointer size: 130 Bytes
  • Size of remote file: 19.8 kB
data/2025/2504_05xxx/2504.05792/images/1dccd3cab821d5c1eaf6b5880ef6b79971c23a5b46324e9c72aad3ff00c1e95a.jpg ADDED

Git LFS Details

  • SHA256: f4da7937bcd846bba6d0299e15ea40c5157ce4fbd93380d215b60cdb298d3062
  • Pointer size: 129 Bytes
  • Size of remote file: 8.82 kB
data/2025/2504_05xxx/2504.05792/images/214063bb72dbd9a11d4eb4e79e473fdc96c3ca35a008baf61c81991492ed251a.jpg ADDED

Git LFS Details

  • SHA256: ec0cd4e6f01708c91928ed2aa7a6e23d4887241426f1243924098f7ec74904bc
  • Pointer size: 130 Bytes
  • Size of remote file: 13.8 kB
data/2025/2504_05xxx/2504.05792/images/247c87c8019459213bfe0ce6435a498ac3d201e36b5445abfef7c87b74001d6d.jpg ADDED

Git LFS Details

  • SHA256: 8b5050c05d3d6fffa073256eaba17454d4132539cbe09a2a37edd14a9644ec06
  • Pointer size: 130 Bytes
  • Size of remote file: 21.3 kB
data/2025/2504_05xxx/2504.05792/images/37093a2ca762a3380380c7f8cf3e0bb02c3ef35903771b9c7dd9cbf571d85434.jpg ADDED

Git LFS Details

  • SHA256: 1d5152a27e2ff50247823e7a806ec3cef71cf02f0ebc1898b8606b306cb0455b
  • Pointer size: 129 Bytes
  • Size of remote file: 9.27 kB
data/2025/2504_05xxx/2504.05792/images/44402e915e3a77623794fffb1854bee176c10134d19af786ec61562a51c00f68.jpg ADDED

Git LFS Details

  • SHA256: 251b780fd8d5d7aad1a29cadb396fadcf6daab6388d5114991ea9d946afdeadd
  • Pointer size: 129 Bytes
  • Size of remote file: 6.61 kB
data/2025/2504_05xxx/2504.05792/images/486c8e561be8deccf96b5bf141f51114caaddd2b6a1e8ef24fb805f7283bf4b4.jpg ADDED

Git LFS Details

  • SHA256: 7f915d279b1546ee33ca8c80f973c585d484f619d9c49e07d4a59623539ec90c
  • Pointer size: 129 Bytes
  • Size of remote file: 7.46 kB
data/2025/2504_05xxx/2504.05792/images/547d79cbc78526a73c650c0b1ea306da82b683b588de1e662784af6b3e9448c8.jpg ADDED

Git LFS Details

  • SHA256: 8499929325f913ff86662ced6ad81634077db887126b7b1e9a960cc38151434b
  • Pointer size: 129 Bytes
  • Size of remote file: 8.09 kB
data/2025/2504_05xxx/2504.05792/images/5959a4483a3d708e1fda07cb15e0ba8ae7a2653fca446613aace2299d7205a6c.jpg ADDED

Git LFS Details

  • SHA256: 57e50f83fbea961e0687a1cc914e423f944cc2da492269cb25ac82ff04004161
  • Pointer size: 130 Bytes
  • Size of remote file: 13.8 kB
data/2025/2504_05xxx/2504.05792/images/5a1798459fbd0aa32c927f7230e28a18dc94758ab47ca58020471c74159cf48b.jpg ADDED

Git LFS Details

  • SHA256: a126e3df359a2882693973352e868765cd4b35c0316e823bc66d3b610b4de60b
  • Pointer size: 130 Bytes
  • Size of remote file: 10.8 kB