MinerU Batch 6c82c03b-0a6e-4e43-9cba-36090a56f72c (Part 2/8)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +8 -0
- data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_content_list.json +0 -0
- data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_model.json +0 -0
- data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf +3 -0
- data/2025/2503_09xxx/2503.09089/full.md +504 -0
- data/2025/2503_09xxx/2503.09089/images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg +3 -0
- data/2025/2503_09xxx/2503.09089/layout.json +0 -0
- data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_content_list.json +1213 -0
- data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_model.json +1639 -0
- data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf +3 -0
- data/2025/2503_09xxx/2503.09198/full.md +222 -0
- data/2025/2503_09xxx/2503.09198/images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg +3 -0
- data/2025/2503_09xxx/2503.09198/images/96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg +3 -0
.gitattributes
CHANGED
|
@@ -1676,3 +1676,11 @@ data/2025/2503_09xxx/2503.09573/48dbe47b-de29-42fc-bf05-7830a092a51d_origin.pdf
|
|
| 1676 |
data/2025/2503_09xxx/2503.09594/1a9d7e5b-676e-4927-af63-f7f6355fd077_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1677 |
data/2025/2503_09xxx/2503.09780/08214423-a242-4bd0-9a40-e8d6d3d1934d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1678 |
data/2025/2503_09xxx/2503.09799/13deeb11-b9f7-4b0d-afd2-8ce11506db61_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1676 |
data/2025/2503_09xxx/2503.09594/1a9d7e5b-676e-4927-af63-f7f6355fd077_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1677 |
data/2025/2503_09xxx/2503.09780/08214423-a242-4bd0-9a40-e8d6d3d1934d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1678 |
data/2025/2503_09xxx/2503.09799/13deeb11-b9f7-4b0d-afd2-8ce11506db61_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1679 |
+
data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1680 |
+
data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1681 |
+
data/2025/2503_09xxx/2503.09277/39fed2ad-9645-4fde-a1c3-86b0c99b7b36_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1682 |
+
data/2025/2503_09xxx/2503.09501/7c196e4e-1362-4974-a470-65c83d863927_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1683 |
+
data/2025/2503_09xxx/2503.09567/17e53201-29b3-43fd-8f2e-78d7b00a58a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1684 |
+
data/2025/2503_09xxx/2503.09595/b59876e9-da8b-438b-ab54-bb4c4d76820f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1685 |
+
data/2025/2503_11xxx/2503.11701/f561bad2-8e9b-4fb7-9083-b32d2bfd8f1f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1686 |
+
data/2025/2503_13xxx/2503.13502/db12ade8-3943-4647-bb0d-ce8160710750_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_09xxx/2503.09089/c2c2c95f-facf-4f82-977c-1820c00d4eb0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fcd0ff4b8f624de14b00fe1f8de43f88471336de8b18bb06c84278e17c1d2e4
|
| 3 |
+
size 777937
|
data/2025/2503_09xxx/2503.09089/full.md
ADDED
|
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LocAgent: Graph-Guided LLM Agents for Code Localization
|
| 2 |
+
|
| 3 |
+
Zhaoling Chen\*, Xiangru Tang\*, Gangda Deng\*, Fang Wu\*, Jialong Wu\*, Zhiwei Jiang, Viktor Prasanna\*, Arman Cohan\*, Xingyao Wang
|
| 4 |
+
|
| 5 |
+
$^{\spadesuit}$ Yale University $^{\spadesuit}$ University of Southern California $^{\spadesuit}$ Stanford University $^{\spadesuit}$ All Hands AI xiangru.tang@yale.edu, gangdade@usc.edu, xingyao@all-hands.dev
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Code localization—identifying precisely where in a codebase changes need to be made—is a fundamental yet challenging task in software maintenance. Existing approaches struggle to efficiently navigate complex codebases when identifying relevant code sections. The challenge lies in bridging natural language problem descriptions with the appropriate code elements, often requiring reasoning across hierarchical structures and multiple dependencies. We introduce LOCAGENT, a framework that addresses code localization through graph-based representation. By parsing codebases into directed heterogeneous graphs, LOCAGENT creates a lightweight representation that captures code structures (files, classes, functions) and their dependencies (imports, invocations, inheritance), enabling LLM agents to effectively search and locate relevant entities through powerful multi-hop reasoning. Experimental results on real-world benchmarks demonstrate that our approach significantly enhances accuracy in code localization. Notably, our method with the fine-tuned Qwen-2.5-Coder-Instruct-32B model achieves comparable results to SOTA proprietary models at greatly reduced cost (approximately $86\%$ reduction), reaching up to $92.7\%$ accuracy on file-level localization while improving downstream GitHub issue resolution success rates by $12\%$ for multiple attempts (Pass@10). Our code is available at https://github.com/gersteinlab/LocAgent.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Code localization can be viewed as an information retrieval (IR) task that aims to identify relevant code snippets given natural language descriptions (Yu et al., 2025; Yang et al., 2024; Xia et al., 2024). Developers spend up to $66\%$ of their debugging time (Böhme et al., 2017) understanding code to
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: Code localization across four common programming scenarios. Given a codebase and an issue description, the goal of code localization is to identify the relevant code snippets that require modification to resolve the issue.
|
| 17 |
+
|
| 18 |
+
make changes, and automated tools often struggle with the same challenge. Poor code localization leads to incomplete fixes, introduces new bugs, and significantly extends development cycles. Unlike traditional retrieval tasks that primarily focus on lexical or semantic matching between queries and documents (Guo et al., 2016, 2020), code localization requires bridging the gap between natural language and programming languages. It also necessitates reasoning capabilities to analyze the issue, while considering the structural and semantic properties of code (Lewis et al., 2020; Guu et al., 2020; Qu et al., 2020). This capability has become fundamental to powerful AI assistants (OpenAI, 2023; Anthropic, 2023), code-aware search engines (PerplexityAI, 2023), and automated programming agents (Cognition.ai, 2024; Wang et al., 2025; Gauthier, 2024). In particular, accurate code localization is crucial for software maintenance and evolution, as it enables precise code modifications for bug fixes, refactoring, and feature additions (Wang et al., 2024), thereby streamlining the development workflow.
|
| 19 |
+
|
| 20 |
+
Existing approaches to code localization face
|
| 21 |
+
|
| 22 |
+
significant limitations. Dense retrieval methods require maintaining and continuously updating vector representations of the entire codebase (Wang et al., 2023b; Günther et al., 2023), creating engineering challenges for large, evolving repositories where code changes frequently. While LLMs demonstrate strong code understanding capabilities (Kang et al., 2023; Wu et al., 2023), models with large context windows cannot process entire codebases at once, necessitating strategic navigation through relevant parts. Moreover, issue descriptions often mention only symptoms rather than underlying causes. For instance, a report of 'XSS vulnerability in user profile' might require changes to a shared validation utility used throughout the codebase but not explicitly referenced in the issue. This disconnect between issue descriptions and affected code components presents a substantial challenge for traditional retrieval approaches, which struggle to trace implicit dependencies across the codebase structure. Recent agent-based methods attempt to address these limitations through iterative exploration (Yang et al., 2024; Qin et al., 2024) but still struggle to efficiently navigate and comprehend complex code structures and dependencies, particularly when multi-hop reasoning is required to trace from issue descriptions to affected code regions that aren't directly mentioned.
|
| 23 |
+
|
| 24 |
+
This raises a key question: How can we design efficient indexing as intermediate representations that are structure-aware and both easy and performant for LLM agents to consume? It is intuitive to design an agentic retrieval system that carefully combines traditional IR methods and LLM agent's reasoning ability to achieve accurate, efficient, and cost-effective code localization in codebases.
|
| 25 |
+
|
| 26 |
+
To address this challenge, we propose LOCAGENT, a framework that builds directed heterogeneous graph indexing to unify code structures, dependencies, and contents. Our approach leverages a structured graph representation that enables powerful multi-hop reasoning capabilities, allowing agents to navigate complex dependency relationships between code elements even when target code isn't explicitly mentioned in issue descriptions. This graph-based approach significantly outperforms previous methods on challenging localization tasks that require traversing multiple code relationships. Our lightweight representation, coupled with sparse indexing techniques, enables efficient entity search while maintaining rich structural information. The indexing process typically
|
| 27 |
+
|
| 28 |
+
takes only a few seconds per codebase, making it highly practical for real-time use. The framework integrates a set of unified tools that guide the agent through a systematic exploration of the codebase, allowing autonomous navigation based on contextual needs. Furthermore, by fine-tuning Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B models(abbr. as Qwen-2.5-7B and Qwen-2.5-32B respectively), our system achieves performance comparable to state-of-the-art models like Claude-3-5-sonnet-20241022 (Anthropic, 2023) (abbr. as Claude-3.5) while significantly reducing API costs by over $80\%$ (from \ $0.66 to \$ 0.09 per example), making it practical for real-world deployment.
|
| 29 |
+
|
| 30 |
+
Additionally, to facilitate a comprehensive evaluation of code localization methods, we introduce LOC-BENCH, a new benchmark specifically designed for this task. Existing benchmarks like SWE-Bench present significant limitations: (1) they risk contamination through data overlap with LLM training sets (Mündler et al., 2024), and (2) they primarily focus on bug fixing, lacking diversity in maintenance scenarios such as feature requests, performance optimizations, and security fixes. In contrast, LOC-BENCH covers diverse scenarios and mitigates potential contamination concerns by incorporating more recent examples from popular Python repositories collected after known LLM training cutoff dates. Additionally, we provide tooling to continuously update the benchmark with new examples, allowing researchers to maintain a fresh evaluation dataset as models evolve and training data cutoffs advance.
|
| 31 |
+
|
| 32 |
+
Our contributions address critical gaps in existing approaches:
|
| 33 |
+
|
| 34 |
+
- We introduce a heterogeneous graph representation that captures both explicit and implicit code relationships, enabling efficient multi-hop reasoning. Our lightweight graph-based indexing process takes only seconds per repository and requires minimal storage.
|
| 35 |
+
- We design unified tools for agent-based code exploration that leverage our graph representation, allowing LLM agents to perform complex multi-hop navigation and reasoning across code dependencies even when target code isn't explicitly mentioned in issue descriptions.
|
| 36 |
+
- We introduce Loc-Bench, a new benchmark
|
| 37 |
+
|
| 38 |
+
specifically designed for code localization that addresses limitations in existing datasets. Unlike previous benchmarks dominated by bug reports, Loc-Bench offers a balanced distribution across bug fixes, feature requests, security patches, and performance optimizations.
|
| 39 |
+
|
| 40 |
+
- By fine-tuning open-source models on this task, we reduce the cost of code localization by $86\%$ while maintaining competitive performance.
|
| 41 |
+
|
| 42 |
+
# 2 Related Work
|
| 43 |
+
|
| 44 |
+
# 2.1 Traditional Retrieval-based Methods
|
| 45 |
+
|
| 46 |
+
Traditional IR methods rely on lexical or semantic matching to return ranked lists of code snippets. Sparse retrievers, such as BM25 (Robertson et al., 1994, 2009), have demonstrated robustness to domain adaptation. Dense retrievers utilize embeddings for improved semantic searching, including models with open checkpoints such as general text embedding models E5-base-v2 (Wang et al., 2022) and proprietary APIs (VoyageAI, 2024). Code embedding models such as Jina-Code-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and CodeRankEmbed (Suresh et al., 2024), trained specifically for code related tasks, showing significant performance in Code2Code and NL2Code semantic search tasks. However, while the embedding models themselves are small, the engineering challenges of maintaining these indexing systems (e.g., storage requirements, update mechanisms, and infrastructure maintenance) make them difficult to adapt to fast-evolving codebases.
|
| 47 |
+
|
| 48 |
+
# 2.2 LLM-based Generative Retrieval Methods
|
| 49 |
+
|
| 50 |
+
Recently, LLMs with advanced code reasoning capabilities have demonstrated superior performance by directly processing queries and raw code for code localization (Kang et al., 2023; Wu et al., 2023; Xia et al., 2024; Kang et al., 2024). For example, Agentless (Xia et al., 2024), initially designed for automated program repair, uses a simplistic hierarchical localization process powered by LLM. It employs a straightforward three-phase approach that first localizes relevant code sections before attempting to fix the identified issues, challenging the assumption that complex agent architectures are necessary for effective code understanding and modification tasks.
|
| 51 |
+
|
| 52 |
+
Expanding on these techniques, agent-based methods utilize multi-step reasoning to enable automated codebase traversal. Specifically, OpenHands (Wang et al., 2025) implements a generalist coding agent that supports bash commands like grep and tools for viewing files. SWE-Agent (Yang et al., 2024) integrates a custom Agent-Computer Interface to support agents to navigate entire repositories. MoatlessTools (Örwall, 2024) combines an agentic searching loop and semantic search to obtain code locations. However, existing agent-based methods face two critical limitations: (a) they primarily navigate codebases through directory traversal rather than understanding semantic relationships, (b) and they struggle to extract and reason about complex cross-file dependencies when these relationships aren't explicitly represented in the repository structure. This significantly impairs their ability to locate code that requires modification when the issue involves interactions between structurally distant components in the codebase.
|
| 53 |
+
|
| 54 |
+
# 2.3 Graph-based Code Representation Methods
|
| 55 |
+
|
| 56 |
+
Due to the inherent structure of code, several works have employed graph-based representations to improve code understanding by capturing key relationships between components. Aider (2023) constructs a RepoMap and uses a graph ranking algorithm to identify the most significant contextual elements. Similarly, as a plugin, RepoGraph (Ouyang et al., 2025) performs subgraph retrieval – extracting an ego-network of relevant lines and their neighbors – to provide structured context. CodexGraph (Liu et al., 2024) indexes the repository into a Neo4j graph database, where LLM agents query the database precisely using Cypher. The efficiency of its retrieval process depends heavily on the querying capabilities of the LLM. These methods focus primarily on providing relevant context but do not enhance the traversal process itself, as they do not explicitly model directory structure or file hierarchies.
|
| 57 |
+
|
| 58 |
+
In contrast, RepoUnderstander (Ma et al., 2024) builds hierarchical and function-call graphs, using Monte Carlo Tree Search (MCTS) guided by an LLM for exploration. While thorough, MCTS introduces extra computational overhead, making it less efficient than simpler traversal methods like BFS, particularly in large repositories. OrcaLoca (Yu et al., 2025) uses a simplified graph
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Figure 2: Overview of LOCAGENT framework. LOCAGENT first parses the given codebase to build a graph-based code representation with various types of entities and relations. It then constructs sparse indexes for exploring structures and searching content. Using these indexes, it performs agent-guided searches that combine the graph and tools.
|
| 62 |
+
|
| 63 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">Relation Types</td><td colspan="4">Node Types</td><td rowspan="2">Search/Traversal Strategy</td></tr><tr><td>Contain</td><td>Import</td><td>Inherit</td><td>Invoke</td><td>Directory</td><td>File</td><td>Class</td><td>Function</td></tr><tr><td>CodexGraph(Liu et al., 2024)</td><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>✓</td><td>✓</td><td>Cypher queries</td></tr><tr><td>RepoGraph(Ouyang et al., 2025)</td><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>✓</td><td>✓</td><td>Ego-graph retrieval</td></tr><tr><td>RepoUnderstander(Ma et al., 2024)</td><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>MCTS</td></tr><tr><td>OrcaLoca(Yu et al., 2025)</td><td>✓</td><td>X</td><td>X</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>Simple search tools</td></tr><tr><td>LOCAGENT(Ours)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>Unified retrieval tools</td></tr></table>
|
| 64 |
+
|
| 65 |
+
Table 1: Comparison of Graph-Based Code Representation Methods.
|
| 66 |
+
|
| 67 |
+
enhanced by priority scheduling and context pruning. It maintains efficient search but may miss complex invocation dependencies. Table 1 summarizes the differences between these methods and LOCAGENT. Compared to these approaches, LOCAGENT offers a more comprehensive and unified representation of the repository, along with efficient, unified retrieval tools specifically designed for LLM consumption.
|
| 68 |
+
|
| 69 |
+
# 3 The LOCAGENT Framework
|
| 70 |
+
|
| 71 |
+
We introduce LOCAGENT, a graph-oriented LLM-agent framework for code localization. Figure 2 illustrates the overall framework. When given a repository, LOCAGENT can locate all the relevant code sections at various granularities (file, class, function, or line level) for different types of GitHub issues (such as bug reports, feature requests, performance bottlenecks, and security vulnerabilities) through automated in-depth exploration and analysis of the codebase. Section 3.1 proposes a novel graph-based indexing approach as an intermediate
|
| 72 |
+
|
| 73 |
+
representation for codebases. Section 3.2 presents our agent-based code search on the indexes and Section 3.3 describes our model fine-tuning and distillation process.
|
| 74 |
+
|
| 75 |
+
# 3.1 Graph-based Code Representation
|
| 76 |
+
|
| 77 |
+
Codebases contain rich structural information, both explicit and implicit, that is essential for agent reasoning. Building on this insight, we develop a graph-based indexing that comprehensively captures codebase relationships while maintaining a granularity suitable for LLM-agents to retrieve.
|
| 78 |
+
|
| 79 |
+
Code Graph Construction. We construct a heterogeneous directed graph $\mathcal{G}(\mathcal{V},\mathcal{E},\mathcal{A},\mathcal{R})$ to index the codebase, where $\nu = \{v_{i}\}_{i = 1}^{n}$ is the node set and $\mathcal{E}\subseteq \mathcal{V}\times \mathcal{V}$ is the edge set. Each node $v\in \mathcal{V}$ and edge $e\in \mathcal{E}$ has an associated type mapping function. For nodes, $\tau (v):\mathcal{V}\to \mathcal{A}$ maps to types $\mathcal{A} = \{\mathrm{directory},\mathrm{file},\mathrm{class},\mathrm{function}\}$ . For edges, $\phi (e):\mathcal{E}\rightarrow \mathcal{R}$ maps to relationships $\mathcal{R} = \{\mathrm{contain},\mathrm{import},\mathrm{invoke},\mathrm{inherit}\}$ . In this paper, we focus our study on Python reposito
|
| 80 |
+
|
| 81 |
+
ries and leave codebases with other programming languages as future work.
|
| 82 |
+
|
| 83 |
+
First, we include all directories and Python files as nodes. Then, we parse each Python file using the abstract syntax tree (AST) to identify inner functions and classes recursively as nodes. We set the function level as the smallest node granularity and use each function's code content as the document for agent retrieval. This approach creates a good balance of information density between the index and documents, allowing LLMs to reason effectively within their context window limitations.
|
| 84 |
+
|
| 85 |
+
As shown in Figure 2, all nodes with different types can be connected as a single tree using the contain relationship. This structure supports standard codebase-navigation operations from existing works. Our code graph further incorporates more advanced codebase relationships as edges: (1) the invoke relationship from function/class to function/class, where an invoke to a class represents class instantiation; (2) the import relationship from file to function/class; and (3) the inherit relationship between classes.
|
| 86 |
+
|
| 87 |
+
Sparse Hierarchical Entity Indexing. We treat nodes in our code graph as entities and build hierarchical indexing based on their contents. For each keyword, we lookup the indexes from top to bottom: (1) We build an entity ID index as a unique identifier for each node using its fully qualified name. For example, a function calculate_sum in the MathUtils class located in src/utils.py would be represented as: src/utils.py:MathUtilscalculate_sum. (2) We construct a global dictionary to map the entity name (e.g., calculate_sum) to all nodes that share the same name. (3) We index entity IDs through an inverted index (i.e., BM25) to handle keyword searches that don't exactly match the IDs or names of entities. (4) For cases where input keywords aren't part of the entities' IDs (e.g., when a keyword refers to a global variable), we build an inverted index that maps code chunk(s) to each entity to cover all possible matches.
|
| 88 |
+
|
| 89 |
+
Remark. Rather than relying solely on directory structures or hierarchical module indexing, our approach captures module dependencies that transcend directory boundaries. Two modules in distant directories (A and B) may appear unrelated in traditional navigation, but if they invoke each other or share inheritance, they're syntactically close in our graph representation. This syntactic
|
| 90 |
+
|
| 91 |
+
<table><tr><td>Tool Name</td><td>Input Params</td><td>Output</td></tr><tr><td>SearchEntity</td><td>Keywords</td><td>Related Entities with Code Snippets</td></tr><tr><td>TraverseGraph</td><td>Start Entity IDs Direction Traverse Hops Entity Types Relation Types</td><td>Traversed Subgraph, including Entities and Relations</td></tr><tr><td>RetrieveEntity</td><td>Entity IDs</td><td>Complete Code of Specified Entities</td></tr></table>
|
| 92 |
+
|
| 93 |
+
Table 2: List of unified APIs provided by LocAgent for code search and exploration.
|
| 94 |
+
|
| 95 |
+
proximity is essential for code localization because issues typically manifest through call relationships rather than directory structure. By capturing these functional dependencies, our approach efficiently identifies related components even when physically distant in the codebase.
|
| 96 |
+
|
| 97 |
+
# 3.2 Agent-guided Code Search
|
| 98 |
+
|
| 99 |
+
We develop tools based on the indexes built offline. During runtime, LOCAGENT takes issue statements as input and launches agents that autonomously use tools to localize target code sections. While the agent may iteratively invoke multiple tools internally to explore the codebase, LOCAGENT presents a simplified interface to users, requiring only a single-turn interaction—users submit an issue statement and receive localization results without additional input. This autonomous, self-contained workflow makes LOCAGENT both easy to deploy and highly practical for real-world use.
|
| 100 |
+
|
| 101 |
+
Tool Design for Codebase Exploration. Recent works (Örwall, 2024; Wang et al., 2025), inspired by GUI-based IDEs, have developed numerous specialized tools for agents to explore codebases. However, these tools are initially designed for human readability, which sacrifices the compactness and efficiency that LLM agents prefer (Yang et al., 2024). Building upon our graph-based code representation, we can develop tools that support efficient higher-order codebase exploration to address these challenges. We unify all codebase navigation, search, and view operations into three tools (Table 2), introduced as follows.
|
| 102 |
+
|
| 103 |
+
SearchEntity: This tool searches codebases using keywords to locate relevant entities through our Hierarchical Entity Index. When an exact match isn't found in the upper index, the system performs a fuzzy search using the lower index. For each entity found, we return its code snippet in three detail
|
| 104 |
+
|
| 105 |
+
levels: fold, preview, and full code (Figure 6). This effectively prevents lengthy code context and reduces noise fed into agents.
|
| 106 |
+
|
| 107 |
+
**TraverseGraph:** This tool performs a type-aware breadth-first search (BFS) on the code graph, starting from input entities and allowing control over both traversal direction and number of hops. This supports agents to perform arbitrary multi-hop codebase navigation through only one action, significantly improving the efficiency compared with existing agent systems. Note that by allowing agents to select entity types and relation types for each traversal, this tool effectively leverages the LLM agents' coding expertise to generate proper meta paths—a crucial element for heterogeneous graph analysis (Lv et al., 2021). For example, by specifying entity types to {class, function} and relation types to {contain, inherit}, this tool returns the UML diagram. Additionally, we design an expanded tree-based format for the output subgraph that encodes both relation types and directions (Figure 7). (Fatemi et al., 2023) demonstrates that LLM performance on graph reasoning depends on the input graph format. Converting a graph into a tree structure encodes topology through the spatial distance between entity names, thereby deriving better performance. For detailed comparisons with alternative graph formats, please see Appendix A.1.2.
|
| 108 |
+
|
| 109 |
+
RetreiveEntity: This tool retrieves complete entity attributes for each input entity ID, including essential information such as file path, line number, and code content.
|
| 110 |
+
|
| 111 |
+
Chain-of-Thought Agent Planning. We use chain-of-thought (CoT) prompting (shown in Appendix D) to guide the agent in solving code localization problems step by step. The agent systematically follows these steps: (1) Keyword extraction. The agent begins by breaking down the issue statement into different categories and then extracts relevant keywords that are closely related to the problem. (2) Linking keywords to code entities. The agent invokes SearchEntity to complete and clarify each extracted keyword.
|
| 112 |
+
|
| 113 |
+
(3) Generate the logical flow from fault to failure. The agent first identifies the entry points that trigger the problem. Then, it iteratively traverse the codebase with TraverseGraph, retrieves code contents with RetrieveEntity, and searches new keywords with SearchEntity. Finally, it generates the logic flow based on the issue and additional context. (4) Locate the target entities. The agent pinpoints all suspicious code entities that need modification
|
| 114 |
+
|
| 115 |
+
based on the logic flow. Then, it ranks these entities based on their relevance.
|
| 116 |
+
|
| 117 |
+
Confidence Estimation Based on Consistency. After generating a complete ranked list of candidate entities, to obtain a more consistent ranking, we measure the consistency (Wang et al., 2023a) of the LLM's predictions across multiple iterations. Specifically, we use the Reciprocal Rank as the initial confidence score for each predicted location. We then aggregate the scores for each entity across iterations to compute its final confidence score. The intuition behind this approach is that if the LLM consistently ranks a location higher in multiple iterations, it is more likely to be relevant.
|
| 118 |
+
|
| 119 |
+
# 3.3 Open-source Model Fine-tuning
|
| 120 |
+
|
| 121 |
+
Given the high costs of proprietary LLM APIs and data security concerns, we fine-tuned open-source models to improve their code localization capabilities and enable local deployment. We collect 433 successful trajectories generated with Claude-3.5, where the agent completed tasks from the SWEBench training set. Due to budget constraints, we sample an additional 335 trajectories generated by the initially fine-tuned Qwen2.5-32B model. Importantly, we only select successful trajectories where the model correctly localized the issues, creating a high-quality dataset of correct reasoning paths. These successful examples are then used to refine the same 32B model further, reinforcing effective reasoning patterns through this self-improvement loop. The entire dataset, combining both Claude-3.5 trajectories and successful Qwen2.5-32B samples, was then used to distill knowledge to a smaller 7B model.
|
| 122 |
+
|
| 123 |
+
To fine-tune the smaller model, we employ Supervised Fine-Tuning (SFT) with LoRA (Hu et al., 2021). Our experiments show that this straightforward distillation method significantly enhances the performance of smaller models. See Appendix C.1.3 for more training details.
|
| 124 |
+
|
| 125 |
+
# 4 LOC-BENCH: A New Benchmark for Code Localization
|
| 126 |
+
|
| 127 |
+
# 4.1 Revisiting Existing Benchmark
|
| 128 |
+
|
| 129 |
+
SWE-Bench(Jimenez et al., 2023) is a widely used benchmark that collects GitHub issues and corresponding code patches that resolve them. Xia et al. (2024); Suresh et al. (2024) adapt its subset, SWE-Bench-Lite, for code localization, treating the patched files and functions as the targets.
|
| 130 |
+
|
| 131 |
+
However, existing datasets, including SWE-Bench, present challenges for effectively evaluating code localization methods. First, they are at risk of contamination, as they may include data overlapping with the repositories or issues used by modern models during pre-training. Second, existing datasets are not specifically designed for code localization (Tomassi et al., 2019). SWE-Bench, for instance, was created primarily to evaluate end-to-end bug-fixing capabilities, with localization being only an implicit intermediate step. This focus results in datasets dominated by bug reports (85% of SWE-Bench-Lite examples) while severely underrepresenting other common software maintenance tasks such as feature requests (14%), security vulnerabilities (1%), and performance optimizations (0%). This imbalance fails to capture the diverse localization challenges faced in real-world software development.
|
| 132 |
+
|
| 133 |
+
# 4.2 Dataset Construction
|
| 134 |
+
|
| 135 |
+
To address the limitations of existing benchmarks, we introduce LOC-BENCH, a new dataset specifically designed for code localization. This dataset collects up-to-date issues from Python repositories to mitigate the influence of pre-training bias in the latest LLMs. Additionally, LOC-BENCH covers wider categories, including bug reports, feature requests, security, and performance issues, enabling a more comprehensive evaluation of code localization methods. The statistics of LOC-BENCH are shown in Table 3.
|
| 136 |
+
|
| 137 |
+
For the Bug Report category, we collect GitHub issues created after October 2024, which is later than the release dates of most modern LLMs. To enrich the dataset with more instances of security and performance issues, we use the GitHub Search API to search for relevant keywords, such as "latency improvement" for performance-related issues. We exclude instances that involve modifying more than five Python files or more than ten functions in the corresponding patch. For further details, see Appendix B.1.
|
| 138 |
+
|
| 139 |
+
# 5 Experiments
|
| 140 |
+
|
| 141 |
+
Our experiments aim to evaluate four key aspects of LOCAGENT: (1) the effectiveness of our graph-based representation and tooling for code localization compared to existing methods, (2) the performance of fine-tuned open-source models as cost-effective alternatives to proprietary LLMs, (3) a detailed analysis of how performance varies across
|
| 142 |
+
|
| 143 |
+
<table><tr><td>Dataset</td><td>Category</td><td>#Sample</td></tr><tr><td rowspan="4">SWE-Bench-Lite (Total = 300)</td><td>Bug Report</td><td>254</td></tr><tr><td>Feature Request</td><td>43</td></tr><tr><td>Security Issue</td><td>3</td></tr><tr><td>Performance Issue</td><td>0</td></tr><tr><td rowspan="4">Loc-Bench (Totoal = 560)</td><td>Bug Report</td><td>242</td></tr><tr><td>Feature Request</td><td>150</td></tr><tr><td>Security Issue</td><td>29</td></tr><tr><td>Performance Issue</td><td>139</td></tr></table>
|
| 144 |
+
|
| 145 |
+
Table 3: Distribution of samples across different categories in the SWE-Bench-Lite and Loc-Bench datasets.
|
| 146 |
+
|
| 147 |
+
task categories, and (4) the contribution of each component in our framework through comprehensive ablation studies. We evaluate on both SWE-Bench-Lite and our introduced Loc-Bench dataset. Additionally, we examine the impact of improved localization on downstream software maintenance tasks.
|
| 148 |
+
|
| 149 |
+
# 5.1 Experimental Settings
|
| 150 |
+
|
| 151 |
+
Datasets. We first conduct experiments on SWEBench-Lite, treating the patched files and functions as the targets for localization. Following Suresh et al. (2024), we excluded examples where no existing functions were modified by the patch, ultimately retaining 274 out of the original 300 examples.
|
| 152 |
+
|
| 153 |
+
Metrics. To assess performance, we use a modified accuracy metric inspired by R-Precision from information retrieval, following Agentless(Xia et al., 2024). To assess performance, we use Acc@k (Accuracy at k) as our evaluation metric, following Agentless(Xia et al., 2024). For each example, we select the top-k predicted locations and consider a localization attempt successful only if all relevant locations are correctly identified within these top-k predictions. This approach measures the ability to fully identify all necessary code sections that require modification. We report results across multiple $k$ values: file localization at Acc@1, Acc@3, and Acc@5, and function localization at Acc@5 and Acc@10. Additionally, to provide a more relaxed evaluation criteria, we assess module localization, which only requires finding any function within the patched class.
|
| 154 |
+
|
| 155 |
+
# 5.2 Baselines
|
| 156 |
+
|
| 157 |
+
We evaluate LOCAGENT against three categories of competitive baselines: (a) Retrieval-based meth
|
| 158 |
+
|
| 159 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Method</td><td rowspan="2">Loc-Model</td><td colspan="3">File (%)</td><td colspan="2">Module (%)</td><td colspan="2">Function (%)</td></tr><tr><td>Acc@1</td><td>Acc@3</td><td>Acc@5</td><td>Acc@5</td><td>Acc@10</td><td>Acc@5</td><td>Acc@10</td></tr><tr><td rowspan="5">Embedding-Based</td><td colspan="2">BM25 (Robertson et al., 1994)</td><td>38.69</td><td>51.82</td><td>61.68</td><td>45.26</td><td>52.92</td><td>31.75</td><td>36.86</td></tr><tr><td colspan="2">E5-base-v2 (Wang et al., 2022)</td><td>49.64</td><td>74.45</td><td>80.29</td><td>67.88</td><td>72.26</td><td>39.42</td><td>51.09</td></tr><tr><td colspan="2">Jina-Code-v2 (Günther et al., 2023)</td><td>43.43</td><td>71.17</td><td>80.29</td><td>63.50</td><td>72.63</td><td>42.34</td><td>52.19</td></tr><tr><td colspan="2">Codesage-large-v2 (Zhang et al., 2024)</td><td>47.81</td><td>69.34</td><td>78.10</td><td>60.58</td><td>69.71</td><td>33.94</td><td>44.53</td></tr><tr><td colspan="2">CodeRankEmbed (Suresh et al., 2024)</td><td>52.55</td><td>77.74</td><td>84.67</td><td>71.90</td><td>78.83</td><td>51.82</td><td>58.76</td></tr><tr><td rowspan="2">Procedure-Based</td><td rowspan="2">Agentless (Xia et al., 2024)</td><td>GPT-4o</td><td>67.15</td><td>74.45</td><td>74.45</td><td>67.15</td><td>67.15</td><td>55.47</td><td>55.47</td></tr><tr><td>Claude-3.5</td><td>72.63</td><td>79.20</td><td>79.56</td><td>68.98</td><td>68.98</td><td>58.76</td><td>58.76</td></tr><tr><td rowspan="9">Agent-Based</td><td rowspan="2">MoutlessTools (Örwall, 2024)</td><td>GPT-4o</td><td>73.36</td><td>84.31</td><td>85.04</td><td>74.82</td><td>76.28</td><td>57.30</td><td>59.49</td></tr><tr><td>Claude-3.5</td><td>72.63</td><td>85.77</td><td>86.13</td><td>76.28</td><td>76.28</td><td>64.60</td><td>64.96</td></tr><tr><td rowspan="2">SWE-agent (Yang et al., 2024)</td><td>GPT-4o</td><td>57.30</td><td>64.96</td><td>68.98</td><td>58.03</td><td>58.03</td><td>45.99</td><td>46.35</td></tr><tr><td>Claude-3.5</td><td>77.37</td><td>87.23</td><td>90.15</td><td>77.74</td><td>78.10</td><td>64.23</td><td>64.60</td></tr><tr><td rowspan="2">Openhands (Wang et al., 2025)</td><td>GPT-4o</td><td>60.95</td><td>71.90</td><td>73.72</td><td>62.41</td><td>63.87</td><td>49.64</td><td>50.36</td></tr><tr><td>Claude-3.5</td><td>76.28</td><td>89.78</td><td>90.15</td><td>83.21</td><td>83.58</td><td>68.25</td><td>70.07</td></tr><tr><td rowspan="3">LOCAGENT (Ours)</td><td>Qwen2.5-7B(ft)</td><td>70.80</td><td>84.67</td><td>88.32</td><td>81.02</td><td>82.85</td><td>64.23</td><td>71.53</td></tr><tr><td>Qwen2.5-32B(ft)</td><td>75.91</td><td>90.51</td><td>92.70</td><td>85.77</td><td>87.23</td><td>71.90</td><td>77.01</td></tr><tr><td>Claude-3.5</td><td>77.74</td><td>91.97</td><td>94.16</td><td>86.50</td><td>87.59</td><td>73.36</td><td>77.37</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Table 4: Performance comparison with baseline methods on code localization on SWE-bench lite. Results show the accuracy at file, module, and function levels. For Agent-Based methods, we use GPT-4o-2024-0513 (abbr. as GPT-4o) and Claude-3-5-sonnet-20241022 (abbr. as Claude-3.5) as the localization model. Additionally, the performance of our fine-tuned open-source models, Qwen2.5-7B(ft) and Qwen2.5-32B(ft), are included for comparison.
|
| 162 |
+
|
| 163 |
+
ods: We include the sparse retrieval approach BM25 (Robertson et al., 1994) and several state-of-the-art embedding models, including the general-purpose E5-base-v2 (Wang et al., 2022) and specialized code embedding models such as JinaCode-v2 (Günther et al., 2023), Codesage-large-v2 (Zhang et al., 2024), and the current SOTA code embedding model CodeRankEmbed (Suresh et al., 2024). Proprietary embedding solutions were excluded due to API costs. (b) Procedure-based methods: We compare against Agentless (Xia et al., 2024), which employs a structured hierarchical approach to code localization without complex agent architectures. (c) Agent-based methods: We include several advanced agent frameworks designed for code exploration and modification, specifically OpenHands (Wang et al., 2025) (using its default CodeActAgent implementation), SWE-Agent (Yang et al., 2024), and MoatlessTools (Örwall, 2024). For implementation details, please refer to Appendix C.1.1.
|
| 164 |
+
|
| 165 |
+
# 5.3 Evaluation Results on SWE-Bench-Lite
|
| 166 |
+
|
| 167 |
+
As shown in Table 4, Agent-Based methods consistently outperform other approaches, and our method demonstrates competitive performance by achieving the best results across all levels of code localization. Unlike traditional retrieval-based methods, Agentless identifies only a limited number of locations due to its narrow repository scope, which hinders performance gains when considering a broader set of candidates. The results of the NDCG are presented in Table 11 in the Appendix.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
Figure 3: Performance analysis at different difficulty levels for file- and function-level localization. All agent-based methods and Agentless use Claude-3.5 as the localization model. Hop $N$ refers to the distances between functions mentioned in the issue description and the ground truth patch on our code graph.
|
| 173 |
+
|
| 174 |
+
To further analyze the results, we examine performance across different task difficulty levels. We measure the task difficulty by calculating the shortest hops between the functions mentioned in the issue descriptions and the patched functions on our code graph (See Appendix C.1.2 for more details). As shown in Figure 3, performance decreases for all methods as the task becomes more challenging. However, Agent-based methods demonstrate better robustness as the difficulty increases, with
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
Figure 4: Comparison of performance between the original and fine-tuned Qwen models. The metrics used are file-level Acc@5 and module/function-level Acc@10. Dashed lines represent the performance of the Claude-3.5 model for reference.
|
| 178 |
+
|
| 179 |
+
our method maintaining competitive performance across various difficulty levels. Retrieval-based methods, such as E5-Base-v2 and CodeRankEmbed, perform poorly at the function level, even when the patched functions are explicitly mentioned in the query. This is because they treat the query as a whole, failing to capture fine-grained details. Agentless performs even worse than retrieval-based methods when exploration beyond the query is needed ( $hop \geq 0$ ) due to its simplistic localization process and limited view focused only on the repository structure.
|
| 180 |
+
|
| 181 |
+
# 5.4 Fine-tuned Open-source Models
|
| 182 |
+
|
| 183 |
+
Figure 4 demonstrates that after fine-tuning, both the 7B and 32B models show significant improvements on this task. LOCAGENT with finetuned Qwen-2.5-Coder-Instruct-32B (abbreviated as Qwen2.5-32B(ft)) achieves performance comparable to Claude-3.5, and LOCAGENT with Qwen2.5-7B(ft) also delivers results on par with that obtained using GPT-4o. As shown in Table 4, our method with Qwen2.5-32B(ft) outperforms nearly all baselines, including those that use larger and more powerful LLMs. The original 7B model performs poorly due to its limited tool-use capability (Chen et al., 2024). These results validate the feasibility of deploying our fine-tuned open-source models as promising alternatives to proprietary APIs, especially in resource-constrained applications.
|
| 184 |
+
|
| 185 |
+
# 5.5 Efficiency Analysis
|
| 186 |
+
|
| 187 |
+
Table 5 presents an efficiency analysis comparing agent-based methods in terms of cost and the number of agent interactions required. MoatlessTools demonstrates good cost-efficiency and requires relatively fewer rounds of interaction. However, the
|
| 188 |
+
|
| 189 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">LM</td><td rowspan="2">#Round</td><td rowspan="2">Cost($)</td><td>Acc@10</td></tr><tr><td>Cost</td></tr><tr><td rowspan="2">MoatlessTools</td><td>GPT-4o</td><td>5</td><td>0.46</td><td>1.3</td></tr><tr><td>Claude-3.5</td><td>5</td><td>0.46</td><td>1.4</td></tr><tr><td rowspan="2">SWE-agent</td><td>GPT-4o</td><td>8</td><td>0.56</td><td>0.8</td></tr><tr><td>Claude-3.5</td><td>9</td><td>0.67</td><td>1.0</td></tr><tr><td rowspan="2">Openhands</td><td>GPT-4o</td><td>15</td><td>0.83</td><td>0.6</td></tr><tr><td>Claude-3.5</td><td>13</td><td>0.79</td><td>0.9</td></tr><tr><td rowspan="3">Ours</td><td>Claude-3.5</td><td>7</td><td>0.66</td><td>1.2</td></tr><tr><td>Qwen2.5-7B(ft)</td><td>6</td><td>0.05</td><td>13.2</td></tr><tr><td>Qwen2.5-32B(ft)</td><td>9</td><td>0.09</td><td>8.6</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 5: Efficiency analysis comparing the average cost and number of agent interaction rounds required by different methods. The cost-efficiency of each method is evaluated using the ratio of function-level Acc@10 to average cost.
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Model Setting</td><td>File Acc@5</td><td>Module Acc@10</td><td>Function Acc@10</td></tr><tr><td>Ours</td><td>88.32</td><td>82.85</td><td>71.53</td></tr><tr><td>w/o TraverseGraph</td><td>86.13</td><td>78.47</td><td>66.06</td></tr><tr><td>Relation Types: contain</td><td>86.50</td><td>79.56</td><td>66.42</td></tr><tr><td>Traverse Hops: 1</td><td>86.86</td><td>80.29</td><td>66.79</td></tr><tr><td>w/o RetrieveEntity</td><td>87.59</td><td>81.39</td><td>69.34</td></tr><tr><td>w/o SearchEntity</td><td>68.98</td><td>61.31</td><td>53.28</td></tr><tr><td>w/o BM25 index</td><td>75.18</td><td>68.98</td><td>60.22</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 6: The ablation study of our model. The metrics used here are file-level Acc@5, module-level Acc@10, and function-level Acc@10. The impact of removing or fixing components is analyzed to observe how each component contributes to the overall accuracy.
|
| 196 |
+
|
| 197 |
+
dense embeddings it uses make it difficult and slow to adapt to fast-evolving codebases. SWE-agent and Openhands also show moderate costs but still do not match the efficiency of LOCAGENT. For LOCAGENT with Claude-3.5, although more rounds of interaction are required, the cost remains lower than that of Openhands, illustrating the token efficiency of our tool's outputs. LOCAGENT with fine-tuned Qwen models stands out for its superior efficiency<sup>1</sup>. Qwen2.5-7B(ft) is the most cost-efficient option, requiring only $0.05 per example, while Qwen2.5-32B(ft) offers a more cost-effective alternative to Claude-3.5. These results highlight the potential of fine-tuned open-source models as efficient alternatives, providing an optimal balance of cost-effectiveness and performance that surpasses other methods.
|
| 198 |
+
|
| 199 |
+
# 5.6 Ablation Study
|
| 200 |
+
|
| 201 |
+
We conduct an ablation study to evaluate the effectiveness of each component of our toolsets. Due to budget constraints, we use the fine-tuned Qwen-2.5-7B as the localization model for these experiments.
|
| 202 |
+
|
| 203 |
+
(1) Each tool in our toolset plays a critical role in code localization performance. As shown in Table 6, removing any tool, especially the SearchEntity tool, leads to varying degrees of accuracy degradation, particularly in module and function level localization. This highlights the critical role each tool plays in identifying relevant modules and functions.
|
| 204 |
+
(2) The graph structure provides essential information for accurate code localization. Removing TraverseGraph tool decreases module and function level performance since the agent cannot obtain any structure information about the codebase and relies on reasoning capability to identify call relationship or directory structure. Adding contain relationship provides only marginal improvements compared to fully removing TraverseGraph, emphasizing the importance of the other three relationship types and explaining why our method surpasses others relying only on the repository structure.
|
| 205 |
+
(3) Multi-hop exploration is crucial for deep code understanding. When compared to the full setting, fixing $Hops = 1$ leads to a moderate decline in file and module-level accuracy, but it causes a more significant decrease in function-level accuracy, underscoring the importance of multi-hop exploration for identifying relevant entities.
|
| 206 |
+
(4) Sparse indexing significantly enhances localization performance. Removing SearchEntity tool, or even partial removal of its index, causes a substantial drop in performance across all metrics. This demonstrates the effectiveness of building a sparse index on our code graph for improving localization performance.
|
| 207 |
+
|
| 208 |
+
# 5.7 Evaluation Results on Loc-Bench
|
| 209 |
+
|
| 210 |
+
To ensure the robustness and generalization of our methods and fine-tuned Qwen models, and to eliminate potential data leakage, we evaluate our new dataset. Since Loc-Bench includes examples that edit 1 to 5 files, we assess file localization at top-5 and top-10 ranks, and function/module localization at top-10 and top-15 ranks. Table 7 shows that our fine-tuned Qwen2.5-7B model exhibits strong gen
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
Figure 5: Performance analysis at different difficulty category for file- and function-level localization. All agent-based baselines and Agentless use Claude-3.5 as the localization model.
|
| 216 |
+
|
| 217 |
+
eralization capabilities, maintaining competitive performance compared to SWE-agent using more expensive and strong model. These results highlight the practicality of the fine-tuned Qwen2.5-7B model for real-world applications. Despite being an open-source alternative, it achieves a performance comparable to Claude-3.5, supporting its feasibility as a cost-effective substitute for commercial models in practical scenarios.
|
| 218 |
+
|
| 219 |
+
Additionally, we evaluate the performance across four different difficulty categories. Figure 5 clearly shows that our method outperforms other methods in almost all categories of code localization. However, it also highlights a noticeable decrease in performance across the other three categories compared to the Bug Report category. This performance gap likely reflects our training data distribution, which contained more bug report examples, potentially leading to scaffolds better optimized for bug localization tasks. This trend suggests that while our method is highly effective for bug report localization, there is still room for improvement in handling the other categories through more balanced training data and category-specific optimization strategies.
|
| 220 |
+
|
| 221 |
+
# 5.8 Application: Better Localization Leads to More Solved GitHub Issues
|
| 222 |
+
|
| 223 |
+
To assess the impact of localization methods on downstream tasks, we evaluated their effectiveness in solving GitHub issues. We choose Agentless as the baseline, ranking among the top-performing
|
| 224 |
+
|
| 225 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Loc Model</td><td colspan="2">File (%)</td><td colspan="2">Module (%)</td><td colspan="2">Function (%)</td></tr><tr><td>Acc@5</td><td>Acc@10</td><td>Acc@10</td><td>Acc@15</td><td>Acc@10</td><td>Acc@15</td></tr><tr><td>IR-Based</td><td>CodeRankEmbed</td><td>74.29</td><td>80.89</td><td>63.21</td><td>67.50</td><td>43.39</td><td>46.61</td></tr><tr><td>Agentless</td><td>Claude-3.5</td><td>67.50</td><td>67.50</td><td>53.39</td><td>53.39</td><td>42.68</td><td>42.68</td></tr><tr><td>OpenHands</td><td>Claude-3.5</td><td>79.82</td><td>80.00</td><td>68.93</td><td>69.11</td><td>59.11</td><td>59.29</td></tr><tr><td>SWE-agent</td><td>Claude-3.5</td><td>77.68</td><td>77.68</td><td>63.57</td><td>63.75</td><td>51.96</td><td>51.96</td></tr><tr><td rowspan="2">LocAgent (Ours)</td><td>Qwen2.5-7B(ft)</td><td>78.57</td><td>79.64</td><td>63.04</td><td>63.04</td><td>51.43</td><td>51.79</td></tr><tr><td>Claude-3.5</td><td>83.39</td><td>86.07</td><td>70.89</td><td>71.07</td><td>59.29</td><td>60.71</td></tr></table>
|
| 226 |
+
|
| 227 |
+
Table 7: Performance evaluation on the real-world LocBench dataset.
|
| 228 |
+
|
| 229 |
+
<table><tr><td>Method</td><td>Localization LM</td><td>Acc@5</td><td>Pass@1</td><td>Pass@10</td></tr><tr><td>Agentless</td><td>Claude-3.5</td><td>58.39</td><td>26.31</td><td>33.58</td></tr><tr><td rowspan="2">Ours</td><td>Qwen2.5-32B(ft)</td><td>69.34</td><td>26.79</td><td>36.13</td></tr><tr><td>Claude-3.5</td><td>73.36</td><td>27.92</td><td>37.59</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Table 8: Impact of localization accuracy on downstream bug repair tasks.
|
| 232 |
+
|
| 233 |
+
open-source submissions on SWE-Bench-Lite. For consistency, we utilized Claude-3.5 as the editing model in conjunction with the Agentless editing method. Table 8 shows that the success rate for solving GitHub issues improves significantly with better code localization accuracy.
|
| 234 |
+
|
| 235 |
+
# 6 Conclusion
|
| 236 |
+
|
| 237 |
+
In conclusion, LOCAGENT enhances code localization by structuring codebases as graphs, enabling efficient repository-level exploration for LLM agents. With fine-tuned open-source models, our method achieves high localization accuracy while significantly reducing costs compared to larger proprietary models. Experimental results demonstrate the effectiveness of LOCAGENT in identifying relevant code components and improving downstream tasks.
|
| 238 |
+
|
| 239 |
+
# Limitations
|
| 240 |
+
|
| 241 |
+
First, our study primarily focused on fine-tuning Qwen-2.5-Coder models. Exploring a broader range of base models, including other open-source LLMs like CodeLlama, Mistral, or Yi, could provide valuable insights into model selection trade-offs. Additionally, investigating different finetuning approaches beyond LoRA, such as full finetuning or other parameter-efficient methods, could potentially yield better performance.
|
| 242 |
+
|
| 243 |
+
Second, though we demonstrated improved bug repair performance with better localization, we only scratched the surface of potential downstream
|
| 244 |
+
|
| 245 |
+
applications. Future work should evaluate LocAgent's impact on other software engineering tasks like refactoring, feature addition, security vulnerability patching, and performance optimization. This would provide a more comprehensive understanding of the framework's practical utility.
|
| 246 |
+
|
| 247 |
+
Moreover, our fine-tuning process relied heavily on trajectories generated by Claude-3.5 and the fine-tuned Qwen2.5-32B model. A more diverse training dataset incorporating examples from different models, tasks, and repositories could improve the robustness and generalization of fine-tuned models. Additionally, analyzing the impact of different dataset compositions and filtering strategies on model performance could yield valuable insights.
|
| 248 |
+
|
| 249 |
+
Finally, the current evaluation focuses primarily on Python codebases. Extending LOCAGENT to support other programming languages and evaluating its performance across different language paradigms would better demonstrate its generalizability. Further, our evaluation metrics could be expanded to include more nuanced measures of localization quality beyond accuracy and NDCG.
|
| 250 |
+
|
| 251 |
+
# References
|
| 252 |
+
|
| 253 |
+
Aider. 2023. Building a better repository map with tree sitter. Accessed: April 15, 2025.
|
| 254 |
+
Anthropic. 2023. Claude: Conversational ai by anthropic. Accessed: January 21, 2025.
|
| 255 |
+
artificialanalysis.ai. 2025. Artificial analysis. https://artificialanalysis.ai/models/. Accessed: 2025-04-28.
|
| 256 |
+
Marcel Böhme, Ezekiel O Soremekun, Sudipta Chattopadhyay, Emamurho Ugherughe, and Andreas Zeller. 2017. Where is the bug and how is it fixed? an experiment with practitioners. In Proceedings of the 2017 11th joint meeting on foundations of software engineering, pages 117-128.
|
| 257 |
+
|
| 258 |
+
Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, et al. 2024. T-eval: Evaluating the tool utilization capability of large language models step by step. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9510-9529.
|
| 259 |
+
Cognition.ai. 2024. Introducing devin, the first ai software engineer.
|
| 260 |
+
John Ellson, Emden Gansner, Lefteris Koutsofios, Stephen C North, and Gordon Woodhull. 2002. Graphviz—open source graph drawing tools. In Graph Drawing: 9th International Symposium, GD 2001 Vienna, Austria, September 23–26, 2001 Revised Papers 9, pages 483–484. Springer.
|
| 261 |
+
Bahare Fatemi, Jonathan Halcrow, and Bryan Perozzi. 2023. Talk like a graph: Encoding graphs for large language models. arXiv preprint arXiv:2310.04560.
|
| 262 |
+
Paul Gauthier. 2024. How aider scored sota $26.3\%$ on swe bench lite | aider. Accessed: January 21, 2025.
|
| 263 |
+
Jiafeng Guo, Yixing Fan, Qingyao Ai, and W Bruce Croft. 2016. A deep relevance matching model for ad-hoc retrieval. In Proceedings of the 25th ACM international on conference on information and knowledge management, pages 55-64.
|
| 264 |
+
Jiafeng Guo, Yixing Fan, Liang Pang, Liu Yang, Qingyao Ai, Hamed Zamani, Chen Wu, W Bruce Croft, and Xueqi Cheng. 2020. A deep look into neural ranking models for information retrieval. Information Processing & Management, 57(6):102067.
|
| 265 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning, pages 3929-3938. PMLR.
|
| 266 |
+
Michael Gunther, Louis Milliken, Jonathan Geuter, Georgios Mastrupas, Bo Wang, and Han Xiao. 2023. Jina embeddings: A novel set of high-performance sentence embedding models. Preprint, arXiv:2307.11224.
|
| 267 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.
|
| 268 |
+
Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, Kai Dang, Yang Fan, Yichang Zhang, An Yang, Rui Men, Fei Huang, Bo Zheng, Yibo Miao, Shanghaoran Quan, Yunlong Feng, Xingzhang Ren, Xuancheng Ren, Jingren Zhou, and Junyang Lin. 2024. Qwen2.5-coder technical report. Preprint, arXiv:2409.12186.
|
| 269 |
+
Hyperbolic. 2025. Hyperbolic website. https:// hyperbolic.xyz/. Accessed: 2025-04-15.
|
| 270 |
+
|
| 271 |
+
Carlos E Jimenez, John Yang, Alexander Wettig, Shunyu Yao, Kexin Pei, Ofir Press, and Karthik Narasimhan. 2023. Swe-bench: Can language models resolve real-world github issues? arXiv preprint arXiv:2310.06770.
|
| 272 |
+
Sungmin Kang, Gabin An, and Shin Yoo. 2023. A preliminary evaluation of llm-based fault localization. arXiv preprint arXiv:2308.05487.
|
| 273 |
+
Sungmin Kang, Gabin An, and Shin Yoo. 2024. A quantitative and qualitative evaluation of llm-based explainable fault localization. Proceedings of the ACM on Software Engineering, 1(FSE):1424-1446.
|
| 274 |
+
Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33:9459-9474.
|
| 275 |
+
Xiangyan Liu, Bo Lan, Zhiyuan Hu, Yang Liu, Zhicheng Zhang, Fei Wang, Michael Shieh, and Wenmeng Zhou. 2024. Codexgraph: Bridging large language models and code repositories via code graph databases. Preprint, arXiv:2408.03910.
|
| 276 |
+
Qingsong Lv, Ming Ding, Qiang Liu, Yuxiang Chen, Wenzheng Feng, Siming He, Chang Zhou, Jianguo Jiang, Yuxiao Dong, and Jie Tang. 2021. Are we really making much progress? revisiting, benchmarking and refining heterogeneous graph neural networks. In Proceedings of the 27th ACM SIGKDD conference on knowledge discovery & data mining, pages 1150-1160.
|
| 277 |
+
Yingwei Ma, Qingping Yang, Rongyu Cao, Binhua Li, Fei Huang, and Yongbin Li. 2024. How to understand whole software repository? arXiv e-prints, pages arXiv-2406.
|
| 278 |
+
Niels Mündler, Mark Müller, Jingxuan He, and Martin Vechev. 2024. Swt-bench: Testing and validating real-world bug-fixes with code agents. Advances in Neural Information Processing Systems, 37:81857-81887.
|
| 279 |
+
OpenAI. 2023. Chatgpt: Language model by openai. Accessed: January 21, 2025.
|
| 280 |
+
Siru Ouyang, Wenhao Yu, Kaixin Ma, Zilin Xiao, Zhihan Zhang, Mengzhao Jia, Jiawei Han, Hongming Zhang, and Dong Yu. 2025. Repograph: Enhancing AI software engineering with repository-level code graph. In The Thirteenth International Conference on Learning Representations.
|
| 281 |
+
PerplexityAI. 2023. Perplexity ai: An ai-powered search engine. Accessed: January 21, 2025.
|
| 282 |
+
Yihao Qin, Shangwen Wang, Yiling Lou, Jinhao Dong, Kaixin Wang, Xiaoling Li, and Xiaoguang Mao. 2024. Agentfl: Scaling llm-based fault localization to project-level context. arXiv preprint arXiv:2403.16362.
|
| 283 |
+
|
| 284 |
+
Chen Qu, Liu Yang, Cen Chen, Minghui Qiu, W Bruce Croft, and Mohit Iyyer. 2020. Open-retrieval conversational question answering. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 539-548.
|
| 285 |
+
Stephen Robertson, Hugo Zaragoza, et al. 2009. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389.
|
| 286 |
+
Stephen E. Robertson, Steve Walker, Susan Jones, Micheline Hancock-Beaulieu, and Mike Gatford. 1994. Okapi at trec-3. In Text Retrieval Conference.
|
| 287 |
+
Tarun Suresh, Revanth Gangi Reddy, Yifei Xu, Zach Nussbaum, Andriy Mulyar, Brandon Duderstadt, and Heng Ji. 2024. Cornstack: High-quality contrastive data for better code ranking. arXiv preprint arXiv:2412.01007.
|
| 288 |
+
David A. Tomassi, Naji Dmeiri, Yichen Wang, Antara Bhowmick, Yen-Chuan Liu, Premkumar Devanbu, Bogdan Vasilescu, and Cindy Rubio-Gonzalez. 2019. Bugswarm: Mining and continuously growing a dataset of reproducible failures and fixes. Preprint, arXiv:1903.06725.
|
| 289 |
+
VoyageAI. 2024. Voyage-code-2: Elevate your code retrieval. Accessed: 2024-02-02.
|
| 290 |
+
Liang Wang, Nan Yang, Xiaolong Huang, Bixing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. Text embeddings by weakly-supervised contrastive pre-training. arXiv preprint arXiv:2212.03533.
|
| 291 |
+
Xingyao Wang, Boxuan Li, Yufan Song, Frank F. Xu, Xiangru Tang, Mingchen Zhuge, Jiayi Pan, Yueqi Song, Bowen Li, Jaskirat Singh, Hoang H. Tran, Fuqiang Li, Ren Ma, Mingzhang Zheng, Bill Qian, Yanjun Shao, Niklas Muennighoff, Yizhe Zhang, Binyuan Hui, Junyang Lin, Robert Brennan, Hao Peng, Heng Ji, and Graham Neubig. 2025. Open hands: An open platform for AI software developers as generalist agents. In The Thirteenth International Conference on Learning Representations.
|
| 292 |
+
Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023a. Self-consistency improves chain of thought reasoning in language models. Preprint, arXiv:2203.11171.
|
| 293 |
+
Yue Wang, Hung Le, Akhilesh Deepak Gotmare, Nghi D. Q. Bui, Junnan Li, and Steven C. H. Hoi. 2023b. Codet5+: Open code large language models for code understanding and generation. Preprint, arXiv:2305.07922.
|
| 294 |
+
Zora Zhiruo Wang, Akari Asai, Xinyan Velocity Yu, Frank F. Xu, Yiqing Xie, Graham Neubig, and Daniel Fried. 2024. Coderag-bench: Can retrieval augment code generation? Preprint, arXiv:2406.14497.
|
| 295 |
+
|
| 296 |
+
Yonghao Wu, Zheng Li, Jie M Zhang, Mike Papadakis, Mark Harman, and Yong Liu. 2023. Large language models in fault localisation. arXiv preprint arXiv:2308.15276.
|
| 297 |
+
Chunqiu Steven Xia, Yinlin Deng, Soren Dunn, and Lingming Zhang. 2024. Agentless: Demystifying llm-based software engineering agents. arXiv preprint arXiv:2407.01489.
|
| 298 |
+
John Yang, Carlos E Jimenez, Alexander Wettig, Kili-ian Lieret, Shunyu Yao, Karthik Narasimhan, and Ofir Press. 2024. Swe-agent: Agent-computer interfaces enable automated software engineering. arXiv preprint arXiv:2405.15793.
|
| 299 |
+
Zhongming Yu, Hejia Zhang, Yujie Zhao, Hanxian Huang, Matrix Yao, Ke Ding, and Jishen Zhao. 2025. Ocaloca: An llm agent framework for software issue localization. arXiv preprint arXiv:2502.00350.
|
| 300 |
+
Dejiao Zhang, Wasi Uddin Ahmad, Ming Tan, Hantian Ding, Ramesh Nallapati, Dan Roth, Xiaofei Ma, and Bing Xiang. 2024. CODE REPRESENTATION LEARNING AT SCALE. In The Twelfth International Conference on Learning Representations.
|
| 301 |
+
Albert Örwall. 2024. Moatless tools.
|
| 302 |
+
|
| 303 |
+
# A LOCAGENT Design Details
|
| 304 |
+
|
| 305 |
+
# A.1 Tool Output Design
|
| 306 |
+
|
| 307 |
+
# A.1.1 Three-level format for SearchEntity output
|
| 308 |
+
|
| 309 |
+
Once invoked by the LLM agent, the retrieval APIs search for files, classes, methods, and code snippets in the codebase, and return the results back to the agent. To avoid forming very lengthy code context that may containing noisy information to LLM, we return only necessary information as API outputs. To achieve this, we desgined four granular standard output formats (Figure 6): fold, preview, full code.
|
| 310 |
+
|
| 311 |
+
# A.1.2 Tree-based Subgraph Formatting for TraverseGraph Output
|
| 312 |
+
|
| 313 |
+
The TraverseGraph tool traverses the code graph and returns a local subgraph for each input entity. The agent reasons about these subgraphs to understand each entity's complex dependencies. However, reasoning about graphs remains challenging for LLMs. Research by (Fatemi et al., 2023) demonstrates that LLM performance varies significantly based on graph formatting (how graphs are encoded as text). This makes the format design for output subgraphs crucial.
|
| 314 |
+
|
| 315 |
+
We have developed a new tree-based format, shown in Figure 7, with several features that enhance LLM reasoning: (1) We represent subgraphs as trees, allowing LLMs to use indentation to determine a node's distance from the root, (2) We display complete entity IDs for each node (e.g., django/core-validators.py:RegexValidator) to help LLMs locate nodes easily, and (3) We explicitly specify relation types for each edge, including reversed relations
|
| 316 |
+
|
| 317 |
+
To evaluate how different graph formats impact code localization performance, we conducted an experiment using 37 challenging samples from SWEBench-Lite. These samples were considered "challenging" because they could not be solved by any baseline agent methods. Using Claude-3.5 as the Localization Model across all settings, we compared various output formats. Table 9 presents our findings. The baseline output formats we tested are described below:
|
| 318 |
+
|
| 319 |
+
- row: For each line, list one row of the adjacency matrix. For example,
|
| 320 |
+
|
| 321 |
+
function "fileA.py:funcA" invokes function "fileA.py:funcB", "fileA.py:funcC"
|
| 322 |
+
|
| 323 |
+
- row (w/ entity attributes): Additionally include entity attributes for format row.
|
| 324 |
+
- incident: The incident format mentioned in (Fatemi et al., 2023). An integer instead of entity ID is used to represent each node. For example,
|
| 325 |
+
|
| 326 |
+
Map function "fileA.py:funcA" to index 0. Map function "fileA.py:funcB" to index 1. Map function "fileA.py:funcC" to index 2.
|
| 327 |
+
|
| 328 |
+
function $O$ invokes function 1,2.
|
| 329 |
+
|
| 330 |
+
Graphviz DOT: Represent graph in Graphviz DOT language (Ellson et al., 2002).
|
| 331 |
+
- JSON: Expand the subgraph as a tree, and convert it to JSON format.
|
| 332 |
+
|
| 333 |
+
As shown in Table 9, expanding subgraphs as trees (i.e., JSON, tree-based) can significantly improve the performance. Our tree-based format achieves the best overall performance across different levels of localization tasks. We also test returning entity attributes along with subgraphs. We notice that row (w/ entity attributes) consistently underperforms row, indicating the attributes for all the nodes may be very noisy. Besides, although using incident format can simplify the output and show improvements in file-level localization, it degradation the module- and file-level localization.
|
| 334 |
+
|
| 335 |
+
# A.2 Implementation
|
| 336 |
+
|
| 337 |
+
To enable the LLM agent to invoke the Code Localization APIs, we handle the interaction differently based on the LLM's capabilities. For LLMs that support tool-calling features, we define the tools as a list of JSON objects, which are then used as parameters for the API calls. For LLMs that do not support tool-calling (such as Qwen), we provide the description of the API and the expected output as part of the LLM's prompt. When the agent decides to invoke a set of retrieval APIs, it responds with a list of API call names and their corresponding arguments. These retrieval API requests are processed locally by searching over the built code graph. The results from executing these APIs locally are returned to the agent.
|
| 338 |
+
|
| 339 |
+
By default, we query the LLM with a temperature setting of 1.0. We conduct two interactions, after which we rerank the results based on mean reciprocal rank (MRR) scores. We also leverage multiprocessing execution to speed up the process. Since all our tools are read-only, LOCAGENT does
|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
Figure 6: Different output formats designed for efficient agent-code interaction. Left: Full code output when matched entities $\leq 3$ . Middle: Preview output showing module skeleton for large files. Right: Fold output showing only entity IDs when matches $>3$ .
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
|
| 348 |
+

|
| 349 |
+
Figure 7: A truncated example of the expanded tree-based format for the output subgraph of tool TraverseGraph.
|
| 350 |
+
|
| 351 |
+
not require a specialized Docker environment to operate.
|
| 352 |
+
|
| 353 |
+
# B Dataset construction and statistics
|
| 354 |
+
|
| 355 |
+
# B.1 Dataset construction details
|
| 356 |
+
|
| 357 |
+
Example collection. We collected examples on popular Python repositories on Github follow (Jimenez et al., 2023). To gather issues related to performance and security, we searched for the keywords listed in Table 10 using the GitHub Search APIs. We then used GPT-4o-2024-0513 as the classifier based on the issue descriptions.
|
| 358 |
+
|
| 359 |
+
Ground Truth Locations. The affected files or functions in the original codebase, as identified in
|
| 360 |
+
|
| 361 |
+
the patches, are considered the target locations for the given issue. While it is possible to fix a bug in a location different from the ground truth, the extracted ground-truth locations still serve as approximate targets for localization. Additionally, edited code such as documents, import statements, and comments are excluded from the localization target. These elements are not considered relevant for bug localization, as they do not directly impact the functionality of the code or its execution. By filtering out these elements, the focus is maintained on the core code changes that are relevant for localization.
|
| 362 |
+
|
| 363 |
+
<table><tr><td rowspan="2">Output Format</td><td colspan="3">File(%)</td><td colspan="2">Module(%)</td><td colspan="2">Function(%)</td></tr><tr><td>Acc@1</td><td>Acc@3</td><td>Acc@5</td><td>Acc@5</td><td>Acc@10</td><td>Acc@5</td><td>Acc@10</td></tr><tr><td>row</td><td>41.18</td><td>67.65</td><td>70.59</td><td>61.76</td><td>61.76</td><td>35.29</td><td>38.24</td></tr><tr><td>row (w/ entity attributes)</td><td>41.18</td><td>64.71</td><td>64.71</td><td>50.00</td><td>50.00</td><td>32.35</td><td>32.35</td></tr><tr><td>incident</td><td>41.18</td><td>70.59</td><td>73.53</td><td>55.88</td><td>55.88</td><td>29.41</td><td>32.35</td></tr><tr><td>Graphviz DOT</td><td>41.18</td><td>73.53</td><td>82.35</td><td>64.71</td><td>64.71</td><td>35.29</td><td>35.29</td></tr><tr><td>JSON</td><td>41.18</td><td>67.65</td><td>76.47</td><td>67.65</td><td>70.59</td><td>38.24</td><td>41.18</td></tr><tr><td>tree-based (Ours)</td><td>47.06</td><td>79.41</td><td>79.41</td><td>64.71</td><td>64.71</td><td>38.24</td><td>41.18</td></tr></table>
|
| 364 |
+
|
| 365 |
+
Table 9: Localization performance under different TraverseGraph output formats.
|
| 366 |
+
|
| 367 |
+
<table><tr><td>Category</td><td>Keywords</td></tr><tr><td>Performance</td><td>bottleneck, performance improvement, memory usage optimization, time complexity reduction, latency improvement, scalability improvement, CPU usage reduction, caching improvement, concurrency optimization</td></tr><tr><td>Security</td><td>Out-of-bounds Write, Out-of-bounds Read, NULL Pointer Dereference, Missing Authorization, memory leak fix, security vulnerability, security issue, authentication bypass, authentication issue, better maintained, buffer overflow, denial of service, security hardening, security patch, unsafe deserialization, Use After Free, Integer Overflow or Wraparound, Uncontrolled Resource Consumption, Missing Authentication for Critical Function</td></tr></table>
|
| 368 |
+
|
| 369 |
+
Table 10: We use these Keywords to search for Performance and Security related issues with Github Search APIs.
|
| 370 |
+
|
| 371 |
+
# C Additional Experiments
|
| 372 |
+
|
| 373 |
+
# C.1 Implementation Details
|
| 374 |
+
|
| 375 |
+
# C.1.1 Baselines Implementation
|
| 376 |
+
|
| 377 |
+
Regarding the embedding-based methods in our evaluation, these approaches operate primarily at the function level, where each function is embedded as a separate unit. The function's context (its containing file and class) is appended to the function representation before embedding, rather than being embedded separately. While theoretically these methods could employ hierarchical indexing, the standard implementations we evaluated use flat indexing structures where each function is embedded as a single unit.
|
| 378 |
+
|
| 379 |
+
We use OpenHands's remote runtime feature to parallelize evaluation on OpenHands and SWEagent. We use Openhands version 0.12.0 released on Oct 31, 2024.
|
| 380 |
+
|
| 381 |
+
# C.1.2 Quantifying Task Difficulty Based on Code Graph Distance
|
| 382 |
+
|
| 383 |
+
We measure task difficulty by computing the average shortest hop distance between the functions mentioned in the issue descriptions and the patched functions within our code graph. Specifically, we first extract potential function names from each
|
| 384 |
+
|
| 385 |
+
issue description using GPT-4o-2024-0513, and identify their corresponding nodes in the code graph using the global dictionary. These identified nodes form the set of predicted nodes, denoted as $\mathcal{C}$ . Similarly, we link the ground truth functions from the patch to their corresponding nodes in the code graph, forming the set of target nodes, denoted as $\mathcal{T}$ . To quantify the difficulty $\delta$ , we calculate the average shortest hop distance between the predicted nodes $\mathcal{C}$ and the target nodes $\mathcal{T}$ , defined as:
|
| 386 |
+
|
| 387 |
+
$$
|
| 388 |
+
\delta = \frac {1}{| \mathcal {C} |} \sum_ {c \in \mathcal {C}} \frac {1}{m i n _ {t \in \mathcal {T}} d (c , t) + 1}
|
| 389 |
+
$$
|
| 390 |
+
|
| 391 |
+
where $d(c, t)$ represents the shortest hop distance between nodes $c$ and $t$ in the graph. For performance analysis stratified by difficulty, we round $\delta$ down to $\lfloor \delta \rfloor$ to group samples by difficulty levels, and we exclude samples where the LLM fails to extract any valid function names.
|
| 392 |
+
|
| 393 |
+
# C.1.3 Training details.
|
| 394 |
+
|
| 395 |
+
Fine-tuning Settings. We use Qwen-2.5-Coder-Instruct (Hui et al., 2024) 7B and 32B variants as our base models. We fine-tuned Qwen-2.5-Coder-Instruct 7B and 32B models on 768 training samples from the SWE-Bench training dataset, leveraging LoRA
|
| 396 |
+
|
| 397 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Method</td><td rowspan="2">Loc-Model</td><td colspan="3">File (%)</td><td colspan="2">Module (%)</td><td colspan="2">Function (%)</td></tr><tr><td>NDCG@1</td><td>NDCG@3</td><td>NDCG@5</td><td>NDCG@5</td><td>NDCG@10</td><td>NDCG@5</td><td>NDCG@10</td></tr><tr><td rowspan="5">Embedding-Based</td><td>BM25 (Robertson et al., 2009)</td><td>38.69</td><td>46.5</td><td>50.61</td><td>37.31</td><td>39.86</td><td>26.15</td><td>27.92</td><td></td></tr><tr><td>E5-base-v2 (Wang et al., 2022)</td><td>49.64</td><td>64.19</td><td>66.6</td><td>53.15</td><td>54.45</td><td>31.39</td><td>35.3</td><td></td></tr><tr><td>Jina-Code-v2 (Günther et al., 2023)</td><td>43.43</td><td>59.93</td><td>63.7</td><td>51.02</td><td>54.13</td><td>33.28</td><td>36.44</td><td></td></tr><tr><td>Codesage-large-v2 (Zhang et al., 2024)</td><td>47.81</td><td>60.82</td><td>64.39</td><td>49.38</td><td>52.22</td><td>27.03</td><td>30.74</td><td></td></tr><tr><td>CodeRankEmbed (Suresh et al., 2024)</td><td>52.55</td><td>67.54</td><td>70.39</td><td>57.51</td><td>59.76</td><td>40.28</td><td>42.55</td><td></td></tr><tr><td rowspan="2">Procedure-Based</td><td rowspan="2">Agentless (Xia et al., 2024)</td><td>GPT-4o</td><td>67.15</td><td>71.76</td><td>71.76</td><td>64.31</td><td>64.31</td><td>53.81</td><td>53.81</td></tr><tr><td>Claude-3.5</td><td>72.63</td><td>76.72</td><td>76.87</td><td>67.36</td><td>67.36</td><td>57.55</td><td>57.55</td></tr><tr><td rowspan="9">Agent-Based</td><td rowspan="2">MoatlessTools (Örwall, 2024)</td><td>GPT-4o</td><td>73.36</td><td>80.03</td><td>80.33</td><td>68.57</td><td>69.09</td><td>49.77</td><td>50.62</td></tr><tr><td>Claude-3.5</td><td>72.63</td><td>80.73</td><td>80.88</td><td>69.11</td><td>69.11</td><td>53.03</td><td>53.16</td></tr><tr><td rowspan="2">SWE-agent (Yang et al., 2024)</td><td>GPT-4o</td><td>57.3</td><td>63.96</td><td>64.12</td><td>53.95</td><td>53.95</td><td>42.32</td><td>42.44</td></tr><tr><td>Claude-3.5</td><td>77.37</td><td>84.32</td><td>84.93</td><td>72.77</td><td>72.9</td><td>59.67</td><td>59.79</td></tr><tr><td rowspan="2">Openshands (Wang et al., 2025)</td><td>GPT-4o</td><td>60.95</td><td>67.62</td><td>68.39</td><td>58.18</td><td>58.6</td><td>44.34</td><td>44.66</td></tr><tr><td>Claude-3.5</td><td>76.28</td><td>84.27</td><td>84.43</td><td>75.79</td><td>75.92</td><td>63.13</td><td>63.8</td></tr><tr><td rowspan="3">LocAgent (Ours)</td><td>Qwen2.5-7B(ft)</td><td>70.80</td><td>79.36</td><td>80.9</td><td>70.99</td><td>71.68</td><td>55.62</td><td>58.09</td></tr><tr><td>Qwen2.5-32B(ft)</td><td>75.91</td><td>84.74</td><td>85.64</td><td>76.28</td><td>76.77</td><td>64.27</td><td>65.93</td></tr><tr><td>Claude-3.5</td><td>77.74</td><td>86.19</td><td>87.14</td><td>77.73</td><td>78.1</td><td>64.34</td><td>65.57</td></tr></table>
|
| 398 |
+
|
| 399 |
+
Table 11: NDCG scores comparison showing ranking quality of different methods.
|
| 400 |
+
|
| 401 |
+
for efficient adaptation. The training set included 447 samples generated by Claude-3.5, while the remaining samples were iteratively generated using the fine-tuned Qwen2.5-32B model. The fine-tuning process was conducted over 5 epochs with max_token set to $128k$ and a learning rate of $2 \times 10^{-4}$ .
|
| 402 |
+
|
| 403 |
+
# D Prompt
|
| 404 |
+
|
| 405 |
+
In this section, we go through the prompt template that make up the agent's history.
|
| 406 |
+
|
| 407 |
+
# Prompt
|
| 408 |
+
|
| 409 |
+
Given the following GitHub problem description, your objective is to localize the specific files, classes or functions, and lines of code that need modification or contain key information to resolve the issue.
|
| 410 |
+
|
| 411 |
+
Follow these steps to localize the issue:
|
| 412 |
+
|
| 413 |
+
Step 1: Categorize and Extract Key Problem Information
|
| 414 |
+
|
| 415 |
+
- Classify the problem statement into the following categories:
|
| 416 |
+
Problem description, error trace, code to reproduce the bug, and additional context.
|
| 417 |
+
- Identify modules in the '{package_name}' package mentioned in each category.
|
| 418 |
+
- Use extracted keywords and line numbers to search for relevant code references for additional context.
|
| 419 |
+
|
| 420 |
+
Step 2: Locate Referenced Modules
|
| 421 |
+
|
| 422 |
+
Accurately determine specific modules
|
| 423 |
+
|
| 424 |
+
- Explore the repo to familiarize yourself with its structure.
|
| 425 |
+
|
| 426 |
+
- Analyze the described execution flow to identify specific modules or components being referenced.
|
| 427 |
+
|
| 428 |
+
- Pay special attention to distinguishing between modules with similar names using context and described execution flow.
|
| 429 |
+
|
| 430 |
+
- Output Format for collected relevant modules:
|
| 431 |
+
|
| 432 |
+
- Use the format: 'file path:QualifiedName'
|
| 433 |
+
|
| 434 |
+
- E.q., for a function `calculate_sum` in the `MathUtilities` class located in `src/helpers/mathHelpers.py`, represent it as:
|
| 435 |
+
|
| 436 |
+
'src/helpers/mathHelpers.py:MathUtil calculator_sum'.
|
| 437 |
+
|
| 438 |
+
## Step 3: Analyze and Reproducing the Problem
|
| 439 |
+
|
| 440 |
+
Clarify the Purpose of the Issue
|
| 441 |
+
|
| 442 |
+
- If expanding capabilities: Identify where and how to incorporate new behavior, fields, or modules.
|
| 443 |
+
|
| 444 |
+
- If addressing unexpected behavior: Focus on localizing modules containing potential bugs.
|
| 445 |
+
|
| 446 |
+
- Reconstruct the execution flow
|
| 447 |
+
|
| 448 |
+
- Identify main entry points triggering the issue.
|
| 449 |
+
|
| 450 |
+
- Trace function calls, class interactions, and sequences of events.
|
| 451 |
+
|
| 452 |
+
- Identify potential breakpoints causing the issue.
|
| 453 |
+
|
| 454 |
+
Important: Keep the reconstructed flow focused on the problem, avoiding irrelevant details.
|
| 455 |
+
|
| 456 |
+
## Step 4: Locate Areas for Modification
|
| 457 |
+
|
| 458 |
+
- Locate specific files, functions, or lines of code requiring changes or containing critical information for resolving the issue.
|
| 459 |
+
|
| 460 |
+
- Consider upstream and downstream dependencies that may affect or be affected by the issue.
|
| 461 |
+
|
| 462 |
+
- If applicable, identify where to introduce new fields, functions, or variables.
|
| 463 |
+
|
| 464 |
+
- Think Thoroughly: List multiple potential solutions and consider edge cases that could impact the resolution.
|
| 465 |
+
|
| 466 |
+
Output Format for Final Results:
|
| 467 |
+
|
| 468 |
+
Your final output should list the locations requiring modification, wrapped with triple back ticks
|
| 469 |
+
|
| 470 |
+
Each location should include the file path, class name (if applicable), function name, or line numbers, ordered by importance.
|
| 471 |
+
|
| 472 |
+
Your answer would better include about 5 files.
|
| 473 |
+
|
| 474 |
+
Examples:
|
| 475 |
+
|
| 476 |
+
full_path1/file1.py
|
| 477 |
+
|
| 478 |
+
line: 10
|
| 479 |
+
|
| 480 |
+
class: MyClass1
|
| 481 |
+
|
| 482 |
+
function: my_function1
|
| 483 |
+
|
| 484 |
+
full path2/file2.py
|
| 485 |
+
|
| 486 |
+
line:76
|
| 487 |
+
|
| 488 |
+
function: MyClass2.my_function2
|
| 489 |
+
|
| 490 |
+
full_path3/file3.py
|
| 491 |
+
|
| 492 |
+
line: 24
|
| 493 |
+
|
| 494 |
+
line: 156
|
| 495 |
+
|
| 496 |
+
function: my_function3
|
| 497 |
+
|
| 498 |
+
#
|
| 499 |
+
|
| 500 |
+
Return just the location(s)
|
| 501 |
+
|
| 502 |
+
Note: Your thinking should be thorough and so it's fine if it's very long.
|
| 503 |
+
|
| 504 |
+
Figure 8: The task instruction prompt for LOCAGENT.
|
data/2025/2503_09xxx/2503.09089/images/003dcde246a439f5b9b36cc33df7c37daaa5e4eaf0478ed30be80b30cbe72965.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/2976dea7e0ff07dfee13b430fc8f8efa94af68a9c7230eb416ed229809e5c751.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/3501fac23dcf5fe773840a66ad8f7737fdc4bb7268e280936b833deb06905a3d.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/3ee9190f79ad7623fb4b180523bd53a7f66d2934c043f7c4febdadae07f9b9c0.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/40113cdec1e1feca7000cb77d3a5294a6135d63a0d343288699aecc05a7b7a58.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/503072355158697ea6cb6ac7e2712ec0ffc412f1cd70207465e4cb36e21f3c9b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/594e4e848668151fa0f0d585dcf3a37fcc51677d8a82c5fdceaf97947d73c7bb.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/65aa2b242676fd4ce7ba9e60ceadb537140f50dff384a8c6651710edad591512.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/68d78298f4ef4a0462b402972796e7a35e59e5ec5b78ccafd6cd08b74dd8ad0d.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/6c9737e3fa48ebc0fa6a563806676ce4a098be8490a88f39f510258c9a12539f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/86b464c56ef20eb2e4a58b1077f46f88aa113910e37ea427df9e03b94670489d.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/ad9888427e4959ea8df866a9f22f434e24d97aa01a98851cc1f0d57d84fed76f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/c20269c8cebd98330e89168b5cf72fa71b25a1845eac8027149a6e7bbe018c9f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/ca58188c6fe4c2ffec34aeccdb6ee5a73bdf63c458fea192f02d6865ca76db6a.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/cbbf57fce918f5d0e7521a35c3e24e9a0612bad18584b2403a531e9d40562501.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/cd90b427caeb94e68300cb0963a3939e90034375ecc1b70e22bb7ba12b53914b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/d58f8658622664aa6fdd9f0ba4233e824e52755bf040929bc8346fe186a5d5e3.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/dd0a043ec99e4aa7d3cdd9cab21f0f8c8ff70d258cc383c3b7961f81be3880dc.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/dffede0cb6e1140d33ef35874b6f55be329c72cec08ada3fd8a7e8684a261136.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/e3da4d4339e99b1a14d6f8d73ea975c643cc44ad95a8dc1803fff42294d4f99a.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/eff607d6667dc8fa01afca421fe5518165076b8c38bf5f9855a1411d560992d1.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/f3ff500f60d006d9ab2100b2e34cdaaed024cc944a6d5f1861937bb334d1a3cf.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/images/f929dd53f4adf74c47eab0524e9c10d9df2c8753a81e68bd756d7c86d5d4876b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09089/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_content_list.json
ADDED
|
@@ -0,0 +1,1213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "A 3D particle visualization system for temperature management",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
194,
|
| 8 |
+
112,
|
| 9 |
+
803,
|
| 10 |
+
175
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Lange B. $^{a}$ , Rodriguez N. $^{a}$ , Puech W. $^{a}$ , Rey H. $^{b}$ and Vasques X. $^{b}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
333,
|
| 19 |
+
200,
|
| 20 |
+
661,
|
| 21 |
+
233
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{a}$ LIRMM, 141 rue ADA, Montpellier, France;",
|
| 28 |
+
"bbox": [
|
| 29 |
+
330,
|
| 30 |
+
234,
|
| 31 |
+
666,
|
| 32 |
+
251
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "<sup>b</sup> IBM, Rue de la vieille poste, Montpellier,",
|
| 39 |
+
"bbox": [
|
| 40 |
+
338,
|
| 41 |
+
252,
|
| 42 |
+
658,
|
| 43 |
+
268
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "France",
|
| 50 |
+
"bbox": [
|
| 51 |
+
470,
|
| 52 |
+
270,
|
| 53 |
+
527,
|
| 54 |
+
284
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Abstract",
|
| 61 |
+
"text_level": 1,
|
| 62 |
+
"bbox": [
|
| 63 |
+
465,
|
| 64 |
+
314,
|
| 65 |
+
531,
|
| 66 |
+
328
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in $1976^{1}$ . In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
214,
|
| 75 |
+
337,
|
| 76 |
+
785,
|
| 77 |
+
513
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details",
|
| 84 |
+
"bbox": [
|
| 85 |
+
143,
|
| 86 |
+
526,
|
| 87 |
+
733,
|
| 88 |
+
541
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1. INTRODUCTION",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
145,
|
| 98 |
+
575,
|
| 99 |
+
370,
|
| 100 |
+
595
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark<sup>1</sup>. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
142,
|
| 109 |
+
609,
|
| 110 |
+
841,
|
| 111 |
+
864
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "page_number",
|
| 117 |
+
"text": "1/10",
|
| 118 |
+
"bbox": [
|
| 119 |
+
483,
|
| 120 |
+
970,
|
| 121 |
+
514,
|
| 122 |
+
979
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "2. RELATED WORK",
|
| 129 |
+
"text_level": 1,
|
| 130 |
+
"bbox": [
|
| 131 |
+
145,
|
| 132 |
+
69,
|
| 133 |
+
375,
|
| 134 |
+
88
|
| 135 |
+
],
|
| 136 |
+
"page_idx": 1
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"type": "text",
|
| 140 |
+
"text": "In this section we present several previous works concerning data visualization, particle systems and level of detail methods.",
|
| 141 |
+
"bbox": [
|
| 142 |
+
143,
|
| 143 |
+
104,
|
| 144 |
+
828,
|
| 145 |
+
136
|
| 146 |
+
],
|
| 147 |
+
"page_idx": 1
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"type": "text",
|
| 151 |
+
"text": "Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. $^{2}$ and K. E. Jordan et al. $^{3}$ present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and",
|
| 152 |
+
"bbox": [
|
| 153 |
+
143,
|
| 154 |
+
138,
|
| 155 |
+
836,
|
| 156 |
+
222
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 1
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "text",
|
| 162 |
+
"text": "T. Riser<sup>6</sup> introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited.",
|
| 163 |
+
"bbox": [
|
| 164 |
+
143,
|
| 165 |
+
234,
|
| 166 |
+
851,
|
| 167 |
+
404
|
| 168 |
+
],
|
| 169 |
+
"page_idx": 1
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"type": "text",
|
| 173 |
+
"text": "In 1976, J. Clark introduces Level Of Detail (LOD) concept<sup>1</sup>. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh<sup>7</sup>. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification<sup>8</sup>. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh<sup>9</sup>. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube<sup>10</sup> algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like<sup>11-13</sup> are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner.",
|
| 174 |
+
"bbox": [
|
| 175 |
+
143,
|
| 176 |
+
406,
|
| 177 |
+
851,
|
| 178 |
+
676
|
| 179 |
+
],
|
| 180 |
+
"page_idx": 1
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"type": "text",
|
| 184 |
+
"text": "3. PROPOSED APPROACH",
|
| 185 |
+
"text_level": 1,
|
| 186 |
+
"bbox": [
|
| 187 |
+
145,
|
| 188 |
+
710,
|
| 189 |
+
452,
|
| 190 |
+
729
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 1
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "text",
|
| 196 |
+
"text": "This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering.",
|
| 197 |
+
"bbox": [
|
| 198 |
+
143,
|
| 199 |
+
744,
|
| 200 |
+
844,
|
| 201 |
+
847
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 1
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "page_number",
|
| 207 |
+
"text": "2/10",
|
| 208 |
+
"bbox": [
|
| 209 |
+
483,
|
| 210 |
+
970,
|
| 211 |
+
514,
|
| 212 |
+
979
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 1
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "3.1 Particle systems",
|
| 219 |
+
"text_level": 1,
|
| 220 |
+
"bbox": [
|
| 221 |
+
142,
|
| 222 |
+
68,
|
| 223 |
+
333,
|
| 224 |
+
85
|
| 225 |
+
],
|
| 226 |
+
"page_idx": 2
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"type": "text",
|
| 230 |
+
"text": "Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length $(l \\in \\mathbb{R})$ , width $(w \\in \\mathbb{R})$ , height $(h \\in \\mathbb{R})$ . Sensors are represented by $S = \\{\\mathrm{S}_1, \\dots, \\mathrm{S}_M\\}$ , where $M$ is the number of sensors. Sensors $\\mathrm{S}_i (\\mathrm{i} \\in \\{1, \\dots, M\\})$ are placed on the space on a layer $\\mathbf{L} \\in \\mathbb{N}$ and have a location represented by: $\\{\\mathbf{X}_i, \\mathbf{Y}_i, \\mathbf{L}_j\\}$ with $\\mathbf{X}_i \\in \\mathbb{R}$ , $\\mathbf{Y}_i \\in \\mathbb{R}$ and $j$ is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks. $^{14}$ Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. $\\mathbf{N} \\in \\mathbb{N}$ represents the number of particles in the room. It can be calculated using:",
|
| 231 |
+
"bbox": [
|
| 232 |
+
138,
|
| 233 |
+
97,
|
| 234 |
+
844,
|
| 235 |
+
270
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 2
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "equation",
|
| 241 |
+
"text": "\n$$\nN = \\frac {\\left(\\left(l + 1\\right) \\times (h + 1) \\times (w + 1)\\right)}{\\delta^ {3}} \\tag {1}\n$$\n",
|
| 242 |
+
"text_format": "latex",
|
| 243 |
+
"bbox": [
|
| 244 |
+
179,
|
| 245 |
+
277,
|
| 246 |
+
524,
|
| 247 |
+
316
|
| 248 |
+
],
|
| 249 |
+
"page_idx": 2
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"type": "text",
|
| 253 |
+
"text": "where $\\delta \\in \\mathbb{R}$ is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center.",
|
| 254 |
+
"bbox": [
|
| 255 |
+
138,
|
| 256 |
+
329,
|
| 257 |
+
841,
|
| 258 |
+
395
|
| 259 |
+
],
|
| 260 |
+
"page_idx": 2
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"type": "text",
|
| 264 |
+
"text": "Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior.",
|
| 265 |
+
"bbox": [
|
| 266 |
+
138,
|
| 267 |
+
398,
|
| 268 |
+
836,
|
| 269 |
+
433
|
| 270 |
+
],
|
| 271 |
+
"page_idx": 2
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"type": "text",
|
| 275 |
+
"text": "3.2 Segmentation algorithms",
|
| 276 |
+
"text_level": 1,
|
| 277 |
+
"bbox": [
|
| 278 |
+
140,
|
| 279 |
+
448,
|
| 280 |
+
418,
|
| 281 |
+
467
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 2
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation.",
|
| 288 |
+
"bbox": [
|
| 289 |
+
138,
|
| 290 |
+
477,
|
| 291 |
+
834,
|
| 292 |
+
527
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 2
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "Voronoi cells is a method to extract a partition of space $^{15}$ . This method is available for $\\phi$ dimensions where $\\phi \\in [1, +\\infty]$ , but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update.",
|
| 299 |
+
"bbox": [
|
| 300 |
+
138,
|
| 301 |
+
527,
|
| 302 |
+
854,
|
| 303 |
+
768
|
| 304 |
+
],
|
| 305 |
+
"page_idx": 2
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"type": "text",
|
| 309 |
+
"text": "3.3 Client server paradigm",
|
| 310 |
+
"text_level": 1,
|
| 311 |
+
"bbox": [
|
| 312 |
+
140,
|
| 313 |
+
782,
|
| 314 |
+
398,
|
| 315 |
+
801
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 2
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the",
|
| 322 |
+
"bbox": [
|
| 323 |
+
138,
|
| 324 |
+
811,
|
| 325 |
+
849,
|
| 326 |
+
931
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 2
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "page_number",
|
| 332 |
+
"text": "3/10",
|
| 333 |
+
"bbox": [
|
| 334 |
+
480,
|
| 335 |
+
969,
|
| 336 |
+
517,
|
| 337 |
+
979
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 2
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint.",
|
| 344 |
+
"bbox": [
|
| 345 |
+
138,
|
| 346 |
+
65,
|
| 347 |
+
830,
|
| 348 |
+
167
|
| 349 |
+
],
|
| 350 |
+
"page_idx": 3
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"type": "text",
|
| 354 |
+
"text": "3.4 Level of detail for particles",
|
| 355 |
+
"text_level": 1,
|
| 356 |
+
"bbox": [
|
| 357 |
+
142,
|
| 358 |
+
183,
|
| 359 |
+
434,
|
| 360 |
+
200
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 3
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "text",
|
| 366 |
+
"text": "Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server.",
|
| 367 |
+
"bbox": [
|
| 368 |
+
138,
|
| 369 |
+
210,
|
| 370 |
+
848,
|
| 371 |
+
363
|
| 372 |
+
],
|
| 373 |
+
"page_idx": 3
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"type": "text",
|
| 377 |
+
"text": "Problems statement:",
|
| 378 |
+
"text_level": 1,
|
| 379 |
+
"bbox": [
|
| 380 |
+
140,
|
| 381 |
+
378,
|
| 382 |
+
307,
|
| 383 |
+
392
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 3
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "text",
|
| 389 |
+
"text": "For this two approaches, we have a set $\\omega$ of Vertices $V$ , $V = \\{V_1, \\ldots, V_\\omega\\}$ . Each vertex is defined in $\\mathbb{R}^3$ . Simplify a mesh using a sample vertex means $\\omega > \\omega 2$ , where $\\omega 2$ is the size of the second data set. For approach 1, we obtain a new object $\\mathrm{V}2 = \\{\\mathrm{V}2_1, \\ldots, \\mathrm{V}2_\\omega\\}$ with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object $\\mathrm{V}3 = \\{\\mathrm{V}3_1, \\ldots, \\mathrm{V}3_\\omega\\}$ with fewer points than V but each point in V 3 is a new vertex.",
|
| 390 |
+
"bbox": [
|
| 391 |
+
138,
|
| 392 |
+
393,
|
| 393 |
+
854,
|
| 394 |
+
481
|
| 395 |
+
],
|
| 396 |
+
"page_idx": 3
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"type": "text",
|
| 400 |
+
"text": "In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. $^{9}$ works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure",
|
| 401 |
+
"bbox": [
|
| 402 |
+
138,
|
| 403 |
+
493,
|
| 404 |
+
848,
|
| 405 |
+
647
|
| 406 |
+
],
|
| 407 |
+
"page_idx": 3
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"type": "text",
|
| 411 |
+
"text": "1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods.",
|
| 412 |
+
"bbox": [
|
| 413 |
+
138,
|
| 414 |
+
657,
|
| 415 |
+
849,
|
| 416 |
+
691
|
| 417 |
+
],
|
| 418 |
+
"page_idx": 3
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"type": "image",
|
| 422 |
+
"img_path": "images/ae6cbd294176dc493b50c73240c504ea8c9fc09c9e3151fba6f0be709b525025.jpg",
|
| 423 |
+
"image_caption": [
|
| 424 |
+
"Figure 1. Clustering method for simplification point cloud."
|
| 425 |
+
],
|
| 426 |
+
"image_footnote": [
|
| 427 |
+
"The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this,"
|
| 428 |
+
],
|
| 429 |
+
"bbox": [
|
| 430 |
+
148,
|
| 431 |
+
696,
|
| 432 |
+
849,
|
| 433 |
+
821
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 3
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "page_number",
|
| 439 |
+
"text": "4/10",
|
| 440 |
+
"bbox": [
|
| 441 |
+
480,
|
| 442 |
+
969,
|
| 443 |
+
517,
|
| 444 |
+
979
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 3
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh.",
|
| 451 |
+
"bbox": [
|
| 452 |
+
140,
|
| 453 |
+
65,
|
| 454 |
+
844,
|
| 455 |
+
167
|
| 456 |
+
],
|
| 457 |
+
"page_idx": 4
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"type": "image",
|
| 461 |
+
"img_path": "images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg",
|
| 462 |
+
"image_caption": [
|
| 463 |
+
"(a) Neighborhood cloud."
|
| 464 |
+
],
|
| 465 |
+
"image_footnote": [],
|
| 466 |
+
"bbox": [
|
| 467 |
+
148,
|
| 468 |
+
172,
|
| 469 |
+
379,
|
| 470 |
+
339
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 4
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "image",
|
| 476 |
+
"img_path": "images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg",
|
| 477 |
+
"image_caption": [
|
| 478 |
+
"(b) Simplification neighborhood of 1."
|
| 479 |
+
],
|
| 480 |
+
"image_footnote": [],
|
| 481 |
+
"bbox": [
|
| 482 |
+
383,
|
| 483 |
+
172,
|
| 484 |
+
612,
|
| 485 |
+
337
|
| 486 |
+
],
|
| 487 |
+
"page_idx": 4
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"type": "image",
|
| 491 |
+
"img_path": "images/c01c12cfcf72e187db35ff49ad4d5edb57331b8d93efa4c1f20941ebf580a3d7.jpg",
|
| 492 |
+
"image_caption": [
|
| 493 |
+
"(c) Simplification neighborhood of 2.",
|
| 494 |
+
"Figure 2. Neighbor method for simplification."
|
| 495 |
+
],
|
| 496 |
+
"image_footnote": [],
|
| 497 |
+
"bbox": [
|
| 498 |
+
617,
|
| 499 |
+
172,
|
| 500 |
+
846,
|
| 501 |
+
338
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 4
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1.",
|
| 508 |
+
"bbox": [
|
| 509 |
+
138,
|
| 510 |
+
409,
|
| 511 |
+
852,
|
| 512 |
+
492
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 4
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "text",
|
| 518 |
+
"text": "Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d).",
|
| 519 |
+
"bbox": [
|
| 520 |
+
138,
|
| 521 |
+
506,
|
| 522 |
+
856,
|
| 523 |
+
642
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 4
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "image",
|
| 529 |
+
"img_path": "images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg",
|
| 530 |
+
"image_caption": [
|
| 531 |
+
"(a) Particles server $(\\mathrm{D} = 2)$",
|
| 532 |
+
"Figure 3. Particle simplification using server and distance."
|
| 533 |
+
],
|
| 534 |
+
"image_footnote": [],
|
| 535 |
+
"bbox": [
|
| 536 |
+
156,
|
| 537 |
+
650,
|
| 538 |
+
380,
|
| 539 |
+
815
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 4
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "image",
|
| 545 |
+
"img_path": "images/c4777a0507eee1c1beaa13c04e351827098bc588e6ced9d8de6cff01430e3df5.jpg",
|
| 546 |
+
"image_caption": [
|
| 547 |
+
"(b) Particles produce server $(\\mathrm{D} = 3)$"
|
| 548 |
+
],
|
| 549 |
+
"image_footnote": [],
|
| 550 |
+
"bbox": [
|
| 551 |
+
388,
|
| 552 |
+
650,
|
| 553 |
+
612,
|
| 554 |
+
815
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 4
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "image",
|
| 560 |
+
"img_path": "images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg",
|
| 561 |
+
"image_caption": [
|
| 562 |
+
"(c) Particles produce by server $(\\mathrm{D} = 1)$"
|
| 563 |
+
],
|
| 564 |
+
"image_footnote": [],
|
| 565 |
+
"bbox": [
|
| 566 |
+
620,
|
| 567 |
+
650,
|
| 568 |
+
844,
|
| 569 |
+
815
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 4
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "page_number",
|
| 575 |
+
"text": "5/10",
|
| 576 |
+
"bbox": [
|
| 577 |
+
482,
|
| 578 |
+
969,
|
| 579 |
+
514,
|
| 580 |
+
979
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 4
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "image",
|
| 586 |
+
"img_path": "images/e340d9807d4ed601eee3bd74351618a9adfda026a8dbd720f9d2aa40857dad26.jpg",
|
| 587 |
+
"image_caption": [
|
| 588 |
+
"(a) Particles and sensors (T = 0)."
|
| 589 |
+
],
|
| 590 |
+
"image_footnote": [],
|
| 591 |
+
"bbox": [
|
| 592 |
+
150,
|
| 593 |
+
66,
|
| 594 |
+
321,
|
| 595 |
+
193
|
| 596 |
+
],
|
| 597 |
+
"page_idx": 5
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "image",
|
| 601 |
+
"img_path": "images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg",
|
| 602 |
+
"image_caption": [
|
| 603 |
+
"(b) Sensors update $(\\mathrm{T} = 1)$ ."
|
| 604 |
+
],
|
| 605 |
+
"image_footnote": [],
|
| 606 |
+
"bbox": [
|
| 607 |
+
328,
|
| 608 |
+
66,
|
| 609 |
+
496,
|
| 610 |
+
194
|
| 611 |
+
],
|
| 612 |
+
"page_idx": 5
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "image",
|
| 616 |
+
"img_path": "images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg",
|
| 617 |
+
"image_caption": [
|
| 618 |
+
"(c) First range $(\\mathrm{T} = 2)$ .",
|
| 619 |
+
"Figure 4. Simplification using bandwidth size."
|
| 620 |
+
],
|
| 621 |
+
"image_footnote": [],
|
| 622 |
+
"bbox": [
|
| 623 |
+
503,
|
| 624 |
+
68,
|
| 625 |
+
669,
|
| 626 |
+
194
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 5
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "image",
|
| 632 |
+
"img_path": "images/a79e8f2104ab8c32df4137808fc6b4ce9b7465bfa15c2080bd3aabb2c58986d0.jpg",
|
| 633 |
+
"image_caption": [
|
| 634 |
+
"(d) Second range $(\\mathrm{T} = 3)$ ."
|
| 635 |
+
],
|
| 636 |
+
"image_footnote": [],
|
| 637 |
+
"bbox": [
|
| 638 |
+
674,
|
| 639 |
+
68,
|
| 640 |
+
848,
|
| 641 |
+
194
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 5
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "text",
|
| 647 |
+
"text": "4. EXPERIMENTAL RESULTS",
|
| 648 |
+
"text_level": 1,
|
| 649 |
+
"bbox": [
|
| 650 |
+
140,
|
| 651 |
+
287,
|
| 652 |
+
490,
|
| 653 |
+
306
|
| 654 |
+
],
|
| 655 |
+
"page_idx": 5
|
| 656 |
+
},
|
| 657 |
+
{
|
| 658 |
+
"type": "text",
|
| 659 |
+
"text": "The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods.",
|
| 660 |
+
"bbox": [
|
| 661 |
+
138,
|
| 662 |
+
321,
|
| 663 |
+
805,
|
| 664 |
+
371
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 5
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "4.1 Data visualization",
|
| 671 |
+
"text_level": 1,
|
| 672 |
+
"bbox": [
|
| 673 |
+
140,
|
| 674 |
+
388,
|
| 675 |
+
346,
|
| 676 |
+
406
|
| 677 |
+
],
|
| 678 |
+
"page_idx": 5
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"text": "We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors.",
|
| 683 |
+
"bbox": [
|
| 684 |
+
138,
|
| 685 |
+
417,
|
| 686 |
+
848,
|
| 687 |
+
571
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 5
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way.",
|
| 694 |
+
"bbox": [
|
| 695 |
+
138,
|
| 696 |
+
584,
|
| 697 |
+
852,
|
| 698 |
+
755
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 5
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "image",
|
| 704 |
+
"img_path": "images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg",
|
| 705 |
+
"image_caption": [],
|
| 706 |
+
"image_footnote": [],
|
| 707 |
+
"bbox": [
|
| 708 |
+
151,
|
| 709 |
+
878,
|
| 710 |
+
367,
|
| 711 |
+
922
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 5
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "image",
|
| 717 |
+
"img_path": "images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg",
|
| 718 |
+
"image_caption": [
|
| 719 |
+
"(b) Room one."
|
| 720 |
+
],
|
| 721 |
+
"image_footnote": [],
|
| 722 |
+
"bbox": [
|
| 723 |
+
398,
|
| 724 |
+
763,
|
| 725 |
+
606,
|
| 726 |
+
898
|
| 727 |
+
],
|
| 728 |
+
"page_idx": 5
|
| 729 |
+
},
|
| 730 |
+
{
|
| 731 |
+
"type": "image",
|
| 732 |
+
"img_path": "images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg",
|
| 733 |
+
"image_caption": [
|
| 734 |
+
"(c) Room two."
|
| 735 |
+
],
|
| 736 |
+
"image_footnote": [],
|
| 737 |
+
"bbox": [
|
| 738 |
+
643,
|
| 739 |
+
765,
|
| 740 |
+
849,
|
| 741 |
+
898
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 5
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "page_number",
|
| 747 |
+
"text": "6/10",
|
| 748 |
+
"bbox": [
|
| 749 |
+
482,
|
| 750 |
+
969,
|
| 751 |
+
514,
|
| 752 |
+
979
|
| 753 |
+
],
|
| 754 |
+
"page_idx": 5
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"type": "text",
|
| 758 |
+
"text": "4.2 Level of details",
|
| 759 |
+
"text_level": 1,
|
| 760 |
+
"bbox": [
|
| 761 |
+
142,
|
| 762 |
+
98,
|
| 763 |
+
325,
|
| 764 |
+
116
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 6
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph $^{20}$ as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances.",
|
| 771 |
+
"bbox": [
|
| 772 |
+
138,
|
| 773 |
+
126,
|
| 774 |
+
857,
|
| 775 |
+
367
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 6
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "table",
|
| 781 |
+
"img_path": "images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg",
|
| 782 |
+
"table_caption": [],
|
| 783 |
+
"table_footnote": [],
|
| 784 |
+
"table_body": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>3900</td><td>240</td><td>36</td></tr></table>",
|
| 785 |
+
"bbox": [
|
| 786 |
+
143,
|
| 787 |
+
375,
|
| 788 |
+
854,
|
| 789 |
+
450
|
| 790 |
+
],
|
| 791 |
+
"page_idx": 6
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"type": "image",
|
| 795 |
+
"img_path": "images/9fbe1142edda7eab13f0abb9592b84e63d1611257e7426d7d569a68b9df82b38.jpg",
|
| 796 |
+
"image_caption": [
|
| 797 |
+
"Figure 5. Data use to model the system.",
|
| 798 |
+
"(a) $\\mathrm{D} = 500$ to 1000.",
|
| 799 |
+
"Figure 6. Clustering visualization algorithms."
|
| 800 |
+
],
|
| 801 |
+
"image_footnote": [],
|
| 802 |
+
"bbox": [
|
| 803 |
+
158,
|
| 804 |
+
508,
|
| 805 |
+
500,
|
| 806 |
+
705
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 6
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "image",
|
| 812 |
+
"img_path": "images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg",
|
| 813 |
+
"image_caption": [
|
| 814 |
+
"Table 1. Results of clustering simplification.",
|
| 815 |
+
"(b) $D = 1000$ to 1500."
|
| 816 |
+
],
|
| 817 |
+
"image_footnote": [],
|
| 818 |
+
"bbox": [
|
| 819 |
+
504,
|
| 820 |
+
508,
|
| 821 |
+
846,
|
| 822 |
+
705
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 6
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "text",
|
| 828 |
+
"text": "Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance.",
|
| 829 |
+
"bbox": [
|
| 830 |
+
138,
|
| 831 |
+
756,
|
| 832 |
+
843,
|
| 833 |
+
859
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 6
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "page_number",
|
| 839 |
+
"text": "7/10",
|
| 840 |
+
"bbox": [
|
| 841 |
+
482,
|
| 842 |
+
969,
|
| 843 |
+
516,
|
| 844 |
+
979
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 6
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "table",
|
| 850 |
+
"img_path": "images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg",
|
| 851 |
+
"table_caption": [],
|
| 852 |
+
"table_footnote": [],
|
| 853 |
+
"table_body": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>22950</td><td>4554</td><td>3524</td></tr></table>",
|
| 854 |
+
"bbox": [
|
| 855 |
+
143,
|
| 856 |
+
61,
|
| 857 |
+
854,
|
| 858 |
+
137
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 7
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "image",
|
| 864 |
+
"img_path": "images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg",
|
| 865 |
+
"image_caption": [
|
| 866 |
+
"(a) Neighborhood 1.",
|
| 867 |
+
"Figure 7. Clustering visualization algorithms using neighbor."
|
| 868 |
+
],
|
| 869 |
+
"image_footnote": [],
|
| 870 |
+
"bbox": [
|
| 871 |
+
153,
|
| 872 |
+
195,
|
| 873 |
+
496,
|
| 874 |
+
402
|
| 875 |
+
],
|
| 876 |
+
"page_idx": 7
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "image",
|
| 880 |
+
"img_path": "images/dafb2130c8265a91a7faed53db3de1181cd524770896acda68d5a953df004f87.jpg",
|
| 881 |
+
"image_caption": [
|
| 882 |
+
"(b) Neighborhood 2."
|
| 883 |
+
],
|
| 884 |
+
"image_footnote": [],
|
| 885 |
+
"bbox": [
|
| 886 |
+
500,
|
| 887 |
+
195,
|
| 888 |
+
843,
|
| 889 |
+
402
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 7
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "text",
|
| 895 |
+
"text": "The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method.",
|
| 896 |
+
"bbox": [
|
| 897 |
+
138,
|
| 898 |
+
470,
|
| 899 |
+
844,
|
| 900 |
+
555
|
| 901 |
+
],
|
| 902 |
+
"page_idx": 7
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"type": "table",
|
| 906 |
+
"img_path": "images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg",
|
| 907 |
+
"table_caption": [
|
| 908 |
+
"Table 2. Results of neighbor simplification."
|
| 909 |
+
],
|
| 910 |
+
"table_footnote": [],
|
| 911 |
+
"table_body": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>120000</td><td>30000</td><td>7500</td><td>1875</td></tr></table>",
|
| 912 |
+
"bbox": [
|
| 913 |
+
143,
|
| 914 |
+
566,
|
| 915 |
+
854,
|
| 916 |
+
640
|
| 917 |
+
],
|
| 918 |
+
"page_idx": 7
|
| 919 |
+
},
|
| 920 |
+
{
|
| 921 |
+
"type": "text",
|
| 922 |
+
"text": "Table 3. Several resolution of model.",
|
| 923 |
+
"bbox": [
|
| 924 |
+
138,
|
| 925 |
+
674,
|
| 926 |
+
424,
|
| 927 |
+
688
|
| 928 |
+
],
|
| 929 |
+
"page_idx": 7
|
| 930 |
+
},
|
| 931 |
+
{
|
| 932 |
+
"type": "text",
|
| 933 |
+
"text": "Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method.",
|
| 934 |
+
"bbox": [
|
| 935 |
+
138,
|
| 936 |
+
691,
|
| 937 |
+
852,
|
| 938 |
+
775
|
| 939 |
+
],
|
| 940 |
+
"page_idx": 7
|
| 941 |
+
},
|
| 942 |
+
{
|
| 943 |
+
"type": "image",
|
| 944 |
+
"img_path": "images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg",
|
| 945 |
+
"image_caption": [
|
| 946 |
+
"(a) $\\mathrm{T} = 0$"
|
| 947 |
+
],
|
| 948 |
+
"image_footnote": [],
|
| 949 |
+
"bbox": [
|
| 950 |
+
148,
|
| 951 |
+
779,
|
| 952 |
+
380,
|
| 953 |
+
912
|
| 954 |
+
],
|
| 955 |
+
"page_idx": 7
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "image",
|
| 959 |
+
"img_path": "images/b2be70bd4cc7d261ac38e8bc653a0c09c537d49d2a5612ed5c067058a98ef463.jpg",
|
| 960 |
+
"image_caption": [
|
| 961 |
+
"(b) $\\mathrm{T} = 1$"
|
| 962 |
+
],
|
| 963 |
+
"image_footnote": [],
|
| 964 |
+
"bbox": [
|
| 965 |
+
383,
|
| 966 |
+
779,
|
| 967 |
+
616,
|
| 968 |
+
912
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 7
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "image",
|
| 974 |
+
"img_path": "images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg",
|
| 975 |
+
"image_caption": [
|
| 976 |
+
"(c) $\\mathrm{T} = 4$"
|
| 977 |
+
],
|
| 978 |
+
"image_footnote": [],
|
| 979 |
+
"bbox": [
|
| 980 |
+
617,
|
| 981 |
+
779,
|
| 982 |
+
849,
|
| 983 |
+
912
|
| 984 |
+
],
|
| 985 |
+
"page_idx": 7
|
| 986 |
+
},
|
| 987 |
+
{
|
| 988 |
+
"type": "page_number",
|
| 989 |
+
"text": "8/10",
|
| 990 |
+
"bbox": [
|
| 991 |
+
482,
|
| 992 |
+
969,
|
| 993 |
+
514,
|
| 994 |
+
979
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 7
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "text",
|
| 1000 |
+
"text": "5. CONCLUSION",
|
| 1001 |
+
"text_level": 1,
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
142,
|
| 1004 |
+
114,
|
| 1005 |
+
339,
|
| 1006 |
+
135
|
| 1007 |
+
],
|
| 1008 |
+
"page_idx": 8
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "text",
|
| 1012 |
+
"text": "In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering.",
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
140,
|
| 1015 |
+
148,
|
| 1016 |
+
849,
|
| 1017 |
+
251
|
| 1018 |
+
],
|
| 1019 |
+
"page_idx": 8
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "text",
|
| 1023 |
+
"text": "For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data \"dressing\". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader.",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
140,
|
| 1026 |
+
252,
|
| 1027 |
+
852,
|
| 1028 |
+
371
|
| 1029 |
+
],
|
| 1030 |
+
"page_idx": 8
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "text",
|
| 1034 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1035 |
+
"text_level": 1,
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
142,
|
| 1038 |
+
402,
|
| 1039 |
+
419,
|
| 1040 |
+
422
|
| 1041 |
+
],
|
| 1042 |
+
"page_idx": 8
|
| 1043 |
+
},
|
| 1044 |
+
{
|
| 1045 |
+
"type": "text",
|
| 1046 |
+
"text": "We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support.",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
138,
|
| 1049 |
+
438,
|
| 1050 |
+
836,
|
| 1051 |
+
506
|
| 1052 |
+
],
|
| 1053 |
+
"page_idx": 8
|
| 1054 |
+
},
|
| 1055 |
+
{
|
| 1056 |
+
"type": "text",
|
| 1057 |
+
"text": "REFERENCES",
|
| 1058 |
+
"text_level": 1,
|
| 1059 |
+
"bbox": [
|
| 1060 |
+
142,
|
| 1061 |
+
539,
|
| 1062 |
+
310,
|
| 1063 |
+
558
|
| 1064 |
+
],
|
| 1065 |
+
"page_idx": 8
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "list",
|
| 1069 |
+
"sub_type": "ref_text",
|
| 1070 |
+
"list_items": [
|
| 1071 |
+
"[1] Clark, J. H., \"Hierarchical geometric models for visible surface algorithms,\" Communications of the ACM 19(10), 547-554 (1976).",
|
| 1072 |
+
"[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., \"Interactive visualization of 3d mantle convection,\" Visual Geosciences (2008).",
|
| 1073 |
+
"[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., \"Parallel interactive visualization of 3d mantle convection,\" IEEE Comput. Sci. Eng. 3(4), 29-37 (1996).",
|
| 1074 |
+
"[4] Reeves, W. T., \"Particle systems - a technique for modeling a class of fuzzy objects,\" ACM Transactions on Graphics 2, 359-376 (1983).",
|
| 1075 |
+
"[5] Latta, L., \"Building a million particle system,\" (2004).",
|
| 1076 |
+
"[6] Kapferer, W. and Riser, T., \"Visualization needs and techniques for astrophysical simulations,\" New Journal of Physics 10(12), 125008 (15pp) (2008).",
|
| 1077 |
+
"[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., \"Decimation of triangle meshes,\" 65-70 (1992).",
|
| 1078 |
+
"[8] Luebke, D., \"A survey of polygonal simplification algorithms,\" (1997).",
|
| 1079 |
+
"[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., \"Voxel based object simplification,\" in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995).",
|
| 1080 |
+
"[10] Lorensen, W. E. and Cline, H. E., \"Marching cubes: A high resolution 3d surface construction algorithm,\" SIGGRAPH Comput. Graph. 21(4), 163-169 (1987).",
|
| 1081 |
+
"[11] Pauly, M., Gross, M., and Kobbelt, L. P., \"Efficient simplification of point-sampled surfaces,\" (2002).",
|
| 1082 |
+
"[12] Moenning, C., , Moenning, C., and Dodgson, N. A., \"Intrinsic point cloud simplification,\""
|
| 1083 |
+
],
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
140,
|
| 1086 |
+
573,
|
| 1087 |
+
852,
|
| 1088 |
+
931
|
| 1089 |
+
],
|
| 1090 |
+
"page_idx": 8
|
| 1091 |
+
},
|
| 1092 |
+
{
|
| 1093 |
+
"type": "header",
|
| 1094 |
+
"text": "Figure 8. Bandwidth simplification.",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
140,
|
| 1097 |
+
65,
|
| 1098 |
+
413,
|
| 1099 |
+
80
|
| 1100 |
+
],
|
| 1101 |
+
"page_idx": 8
|
| 1102 |
+
},
|
| 1103 |
+
{
|
| 1104 |
+
"type": "page_number",
|
| 1105 |
+
"text": "9/10",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
480,
|
| 1108 |
+
969,
|
| 1109 |
+
514,
|
| 1110 |
+
979
|
| 1111 |
+
],
|
| 1112 |
+
"page_idx": 8
|
| 1113 |
+
},
|
| 1114 |
+
{
|
| 1115 |
+
"type": "list",
|
| 1116 |
+
"sub_type": "ref_text",
|
| 1117 |
+
"list_items": [
|
| 1118 |
+
"(2004).",
|
| 1119 |
+
"[13] Song, H. and Feng, H.-Y., \"A progressive point cloud simplification algorithm with preserved sharp edge data,\" The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009).",
|
| 1120 |
+
"[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., \"Spyglass: a wireless sensor network visualizer,\" SIGBED Rev. 2(1), 1-6 (2005).",
|
| 1121 |
+
"[15] Avis, D. and Bhattacharya, B., \"Algorithms for computing d-dimensional voronoi diagrams and their duals,\" 1, 159-180 (1983).",
|
| 1122 |
+
"[16] Rycroft, C. H., \"Voro++: a three-dimensional voronoi cell library in $c++$ ,\" Chaos 19 (2009). Lawrence Berkeley National Laboratory.",
|
| 1123 |
+
"[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., \"The quickhull algorithm for convex hulls,\" ACM Trans. Math. Softw. 22(4), 469-483 (1996).",
|
| 1124 |
+
"[18] Snyder, J. M. and Barr, A. H., \"Ray tracing complex models containing surface tessellations,\" SIGGRAPH Comput. Graph. 21(4), 119-128 (1987).",
|
| 1125 |
+
"[19] Hoppe, H., \"Progressive meshes. computer graphics,\" SIGGRAPH96 Proceedings, 99108 (1996).",
|
| 1126 |
+
"[20] Burns, D. and Osfield, R., \"Open scene graph a: Introduction, b: Examples and applications,\" 265 (2004)."
|
| 1127 |
+
],
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
142,
|
| 1130 |
+
65,
|
| 1131 |
+
849,
|
| 1132 |
+
371
|
| 1133 |
+
],
|
| 1134 |
+
"page_idx": 9
|
| 1135 |
+
},
|
| 1136 |
+
{
|
| 1137 |
+
"type": "text",
|
| 1138 |
+
"text": "Further author information:",
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
166,
|
| 1141 |
+
393,
|
| 1142 |
+
388,
|
| 1143 |
+
407
|
| 1144 |
+
],
|
| 1145 |
+
"page_idx": 9
|
| 1146 |
+
},
|
| 1147 |
+
{
|
| 1148 |
+
"type": "text",
|
| 1149 |
+
"text": "Lange B.: E-mail: benoit.lange@lirmm.fr",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
166,
|
| 1152 |
+
411,
|
| 1153 |
+
480,
|
| 1154 |
+
426
|
| 1155 |
+
],
|
| 1156 |
+
"page_idx": 9
|
| 1157 |
+
},
|
| 1158 |
+
{
|
| 1159 |
+
"type": "text",
|
| 1160 |
+
"text": "Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
166,
|
| 1163 |
+
428,
|
| 1164 |
+
542,
|
| 1165 |
+
443
|
| 1166 |
+
],
|
| 1167 |
+
"page_idx": 9
|
| 1168 |
+
},
|
| 1169 |
+
{
|
| 1170 |
+
"type": "text",
|
| 1171 |
+
"text": "Puech W.: E-mail: william.puech@lirmm.fr",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
166,
|
| 1174 |
+
445,
|
| 1175 |
+
496,
|
| 1176 |
+
459
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 9
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "Rey H.: E-mail:REYHERVE@fr.ibm.com",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
166,
|
| 1185 |
+
462,
|
| 1186 |
+
467,
|
| 1187 |
+
477
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 9
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "text",
|
| 1193 |
+
"text": "Vasques X.: E-mail: xaviervasques@fr.ibm.com",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
166,
|
| 1196 |
+
479,
|
| 1197 |
+
527,
|
| 1198 |
+
494
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 9
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "page_number",
|
| 1204 |
+
"text": "10/10",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
480,
|
| 1207 |
+
969,
|
| 1208 |
+
517,
|
| 1209 |
+
979
|
| 1210 |
+
],
|
| 1211 |
+
"page_idx": 9
|
| 1212 |
+
}
|
| 1213 |
+
]
|
data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_model.json
ADDED
|
@@ -0,0 +1,1639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "title",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.195,
|
| 7 |
+
0.113,
|
| 8 |
+
0.804,
|
| 9 |
+
0.176
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "A 3D particle visualization system for temperature management"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.334,
|
| 18 |
+
0.201,
|
| 19 |
+
0.663,
|
| 20 |
+
0.234
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Lange B. \\(^{a}\\), Rodriguez N. \\(^{a}\\), Puech W. \\(^{a}\\), Rey H. \\(^{b}\\) and Vasques X. \\(^{b}\\)"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.331,
|
| 29 |
+
0.236,
|
| 30 |
+
0.667,
|
| 31 |
+
0.252
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "\\(^{a}\\) LIRMM, 141 rue ADA, Montpellier, France;"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.339,
|
| 40 |
+
0.253,
|
| 41 |
+
0.659,
|
| 42 |
+
0.269
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "<sup>b</sup> IBM, Rue de la vieille poste, Montpellier,"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.471,
|
| 51 |
+
0.271,
|
| 52 |
+
0.528,
|
| 53 |
+
0.285
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "France"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "title",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.467,
|
| 62 |
+
0.315,
|
| 63 |
+
0.532,
|
| 64 |
+
0.329
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Abstract"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.215,
|
| 73 |
+
0.338,
|
| 74 |
+
0.786,
|
| 75 |
+
0.514
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in \\(1976^{1}\\). In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution."
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.145,
|
| 84 |
+
0.527,
|
| 85 |
+
0.735,
|
| 86 |
+
0.542
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "title",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.147,
|
| 95 |
+
0.576,
|
| 96 |
+
0.371,
|
| 97 |
+
0.596
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "1. INTRODUCTION"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.143,
|
| 106 |
+
0.611,
|
| 107 |
+
0.842,
|
| 108 |
+
0.866
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark<sup>1</sup>. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "page_number",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.484,
|
| 117 |
+
0.971,
|
| 118 |
+
0.516,
|
| 119 |
+
0.98
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "1/10"
|
| 123 |
+
}
|
| 124 |
+
],
|
| 125 |
+
[
|
| 126 |
+
{
|
| 127 |
+
"type": "title",
|
| 128 |
+
"bbox": [
|
| 129 |
+
0.147,
|
| 130 |
+
0.07,
|
| 131 |
+
0.376,
|
| 132 |
+
0.089
|
| 133 |
+
],
|
| 134 |
+
"angle": 0,
|
| 135 |
+
"content": "2. RELATED WORK"
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"bbox": [
|
| 140 |
+
0.145,
|
| 141 |
+
0.105,
|
| 142 |
+
0.83,
|
| 143 |
+
0.137
|
| 144 |
+
],
|
| 145 |
+
"angle": 0,
|
| 146 |
+
"content": "In this section we present several previous works concerning data visualization, particle systems and level of detail methods."
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"bbox": [
|
| 151 |
+
0.145,
|
| 152 |
+
0.139,
|
| 153 |
+
0.837,
|
| 154 |
+
0.223
|
| 155 |
+
],
|
| 156 |
+
"angle": 0,
|
| 157 |
+
"content": "Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. \\(^{2}\\) and K. E. Jordan et al. \\(^{3}\\) present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and"
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.145,
|
| 163 |
+
0.235,
|
| 164 |
+
0.852,
|
| 165 |
+
0.405
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": "T. Riser<sup>6</sup> introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited."
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.145,
|
| 174 |
+
0.407,
|
| 175 |
+
0.852,
|
| 176 |
+
0.678
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "In 1976, J. Clark introduces Level Of Detail (LOD) concept<sup>1</sup>. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh<sup>7</sup>. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification<sup>8</sup>. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh<sup>9</sup>. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube<sup>10</sup> algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like<sup>11-13</sup> are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "title",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.147,
|
| 185 |
+
0.711,
|
| 186 |
+
0.453,
|
| 187 |
+
0.731
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "3. PROPOSED APPROACH"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.145,
|
| 196 |
+
0.746,
|
| 197 |
+
0.845,
|
| 198 |
+
0.848
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "page_number",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.485,
|
| 207 |
+
0.971,
|
| 208 |
+
0.515,
|
| 209 |
+
0.98
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "2/10"
|
| 213 |
+
}
|
| 214 |
+
],
|
| 215 |
+
[
|
| 216 |
+
{
|
| 217 |
+
"type": "title",
|
| 218 |
+
"bbox": [
|
| 219 |
+
0.143,
|
| 220 |
+
0.069,
|
| 221 |
+
0.334,
|
| 222 |
+
0.087
|
| 223 |
+
],
|
| 224 |
+
"angle": 0,
|
| 225 |
+
"content": "3.1 Particle systems"
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "text",
|
| 229 |
+
"bbox": [
|
| 230 |
+
0.14,
|
| 231 |
+
0.098,
|
| 232 |
+
0.845,
|
| 233 |
+
0.271
|
| 234 |
+
],
|
| 235 |
+
"angle": 0,
|
| 236 |
+
"content": "Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length \\((l \\in \\mathbb{R})\\), width \\((w \\in \\mathbb{R})\\), height \\((h \\in \\mathbb{R})\\). Sensors are represented by \\(S = \\{\\mathrm{S}_1, \\dots, \\mathrm{S}_M\\}\\), where \\(M\\) is the number of sensors. Sensors \\(\\mathrm{S}_i (\\mathrm{i} \\in \\{1, \\dots, M\\})\\) are placed on the space on a layer \\(\\mathbf{L} \\in \\mathbb{N}\\) and have a location represented by: \\(\\{\\mathbf{X}_i, \\mathbf{Y}_i, \\mathbf{L}_j\\}\\) with \\(\\mathbf{X}_i \\in \\mathbb{R}\\), \\(\\mathbf{Y}_i \\in \\mathbb{R}\\) and \\(j\\) is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks.\\(^{14}\\) Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. \\(\\mathbf{N} \\in \\mathbb{N}\\) represents the number of particles in the room. It can be calculated using:"
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "equation",
|
| 240 |
+
"bbox": [
|
| 241 |
+
0.181,
|
| 242 |
+
0.279,
|
| 243 |
+
0.525,
|
| 244 |
+
0.318
|
| 245 |
+
],
|
| 246 |
+
"angle": 0,
|
| 247 |
+
"content": "\\[\nN = \\frac {\\left(\\left(l + 1\\right) \\times (h + 1) \\times (w + 1)\\right)}{\\delta^ {3}} \\tag {1}\n\\]"
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "text",
|
| 251 |
+
"bbox": [
|
| 252 |
+
0.139,
|
| 253 |
+
0.33,
|
| 254 |
+
0.842,
|
| 255 |
+
0.396
|
| 256 |
+
],
|
| 257 |
+
"angle": 0,
|
| 258 |
+
"content": "where \\(\\delta \\in \\mathbb{R}\\) is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center."
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"type": "text",
|
| 262 |
+
"bbox": [
|
| 263 |
+
0.139,
|
| 264 |
+
0.399,
|
| 265 |
+
0.837,
|
| 266 |
+
0.434
|
| 267 |
+
],
|
| 268 |
+
"angle": 0,
|
| 269 |
+
"content": "Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior."
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"type": "title",
|
| 273 |
+
"bbox": [
|
| 274 |
+
0.142,
|
| 275 |
+
0.449,
|
| 276 |
+
0.419,
|
| 277 |
+
0.468
|
| 278 |
+
],
|
| 279 |
+
"angle": 0,
|
| 280 |
+
"content": "3.2 Segmentation algorithms"
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "text",
|
| 284 |
+
"bbox": [
|
| 285 |
+
0.139,
|
| 286 |
+
0.478,
|
| 287 |
+
0.836,
|
| 288 |
+
0.528
|
| 289 |
+
],
|
| 290 |
+
"angle": 0,
|
| 291 |
+
"content": "In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation."
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"type": "text",
|
| 295 |
+
"bbox": [
|
| 296 |
+
0.139,
|
| 297 |
+
0.529,
|
| 298 |
+
0.856,
|
| 299 |
+
0.769
|
| 300 |
+
],
|
| 301 |
+
"angle": 0,
|
| 302 |
+
"content": "Voronoi cells is a method to extract a partition of space \\(^{15}\\). This method is available for \\(\\phi\\) dimensions where \\(\\phi \\in [1, +\\infty]\\), but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update."
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "title",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.142,
|
| 308 |
+
0.783,
|
| 309 |
+
0.4,
|
| 310 |
+
0.803
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": "3.3 Client server paradigm"
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "text",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.139,
|
| 319 |
+
0.812,
|
| 320 |
+
0.851,
|
| 321 |
+
0.933
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the"
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "page_number",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.482,
|
| 330 |
+
0.97,
|
| 331 |
+
0.518,
|
| 332 |
+
0.981
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "3/10"
|
| 336 |
+
}
|
| 337 |
+
],
|
| 338 |
+
[
|
| 339 |
+
{
|
| 340 |
+
"type": "text",
|
| 341 |
+
"bbox": [
|
| 342 |
+
0.14,
|
| 343 |
+
0.066,
|
| 344 |
+
0.831,
|
| 345 |
+
0.168
|
| 346 |
+
],
|
| 347 |
+
"angle": 0,
|
| 348 |
+
"content": "transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint."
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"type": "title",
|
| 352 |
+
"bbox": [
|
| 353 |
+
0.143,
|
| 354 |
+
0.184,
|
| 355 |
+
0.436,
|
| 356 |
+
0.202
|
| 357 |
+
],
|
| 358 |
+
"angle": 0,
|
| 359 |
+
"content": "3.4 Level of detail for particles"
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "text",
|
| 363 |
+
"bbox": [
|
| 364 |
+
0.139,
|
| 365 |
+
0.212,
|
| 366 |
+
0.849,
|
| 367 |
+
0.364
|
| 368 |
+
],
|
| 369 |
+
"angle": 0,
|
| 370 |
+
"content": "Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server."
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "title",
|
| 374 |
+
"bbox": [
|
| 375 |
+
0.141,
|
| 376 |
+
0.379,
|
| 377 |
+
0.308,
|
| 378 |
+
0.393
|
| 379 |
+
],
|
| 380 |
+
"angle": 0,
|
| 381 |
+
"content": "Problems statement:"
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"bbox": [
|
| 386 |
+
0.139,
|
| 387 |
+
0.394,
|
| 388 |
+
0.856,
|
| 389 |
+
0.482
|
| 390 |
+
],
|
| 391 |
+
"angle": 0,
|
| 392 |
+
"content": "For this two approaches, we have a set \\(\\omega\\) of Vertices \\(V\\), \\(V = \\{V_1, \\ldots, V_\\omega\\}\\). Each vertex is defined in \\(\\mathbb{R}^3\\). Simplify a mesh using a sample vertex means \\(\\omega > \\omega 2\\), where \\(\\omega 2\\) is the size of the second data set. For approach 1, we obtain a new object \\(\\mathrm{V}2 = \\{\\mathrm{V}2_1, \\ldots, \\mathrm{V}2_\\omega\\}\\) with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object \\(\\mathrm{V}3 = \\{\\mathrm{V}3_1, \\ldots, \\mathrm{V}3_\\omega\\}\\) with fewer points than V but each point in V 3 is a new vertex."
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"bbox": [
|
| 397 |
+
0.139,
|
| 398 |
+
0.494,
|
| 399 |
+
0.849,
|
| 400 |
+
0.648
|
| 401 |
+
],
|
| 402 |
+
"angle": 0,
|
| 403 |
+
"content": "In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. \\(^{9}\\) works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure"
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "text",
|
| 407 |
+
"bbox": [
|
| 408 |
+
0.14,
|
| 409 |
+
0.659,
|
| 410 |
+
0.851,
|
| 411 |
+
0.693
|
| 412 |
+
],
|
| 413 |
+
"angle": 0,
|
| 414 |
+
"content": "1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods."
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "image",
|
| 418 |
+
"bbox": [
|
| 419 |
+
0.149,
|
| 420 |
+
0.697,
|
| 421 |
+
0.851,
|
| 422 |
+
0.822
|
| 423 |
+
],
|
| 424 |
+
"angle": 0,
|
| 425 |
+
"content": null
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"type": "image_caption",
|
| 429 |
+
"bbox": [
|
| 430 |
+
0.14,
|
| 431 |
+
0.843,
|
| 432 |
+
0.593,
|
| 433 |
+
0.859
|
| 434 |
+
],
|
| 435 |
+
"angle": 0,
|
| 436 |
+
"content": "Figure 1. Clustering method for simplification point cloud."
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "image_footnote",
|
| 440 |
+
"bbox": [
|
| 441 |
+
0.139,
|
| 442 |
+
0.86,
|
| 443 |
+
0.854,
|
| 444 |
+
0.929
|
| 445 |
+
],
|
| 446 |
+
"angle": 0,
|
| 447 |
+
"content": "The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this,"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "page_number",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.482,
|
| 453 |
+
0.97,
|
| 454 |
+
0.518,
|
| 455 |
+
0.98
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "4/10"
|
| 459 |
+
}
|
| 460 |
+
],
|
| 461 |
+
[
|
| 462 |
+
{
|
| 463 |
+
"type": "text",
|
| 464 |
+
"bbox": [
|
| 465 |
+
0.141,
|
| 466 |
+
0.066,
|
| 467 |
+
0.845,
|
| 468 |
+
0.168
|
| 469 |
+
],
|
| 470 |
+
"angle": 0,
|
| 471 |
+
"content": "we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh."
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "image",
|
| 475 |
+
"bbox": [
|
| 476 |
+
0.149,
|
| 477 |
+
0.174,
|
| 478 |
+
0.38,
|
| 479 |
+
0.34
|
| 480 |
+
],
|
| 481 |
+
"angle": 0,
|
| 482 |
+
"content": null
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "image_caption",
|
| 486 |
+
"bbox": [
|
| 487 |
+
0.149,
|
| 488 |
+
0.341,
|
| 489 |
+
0.296,
|
| 490 |
+
0.372
|
| 491 |
+
],
|
| 492 |
+
"angle": 0,
|
| 493 |
+
"content": "(a) Neighborhood cloud."
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "image",
|
| 497 |
+
"bbox": [
|
| 498 |
+
0.384,
|
| 499 |
+
0.174,
|
| 500 |
+
0.613,
|
| 501 |
+
0.338
|
| 502 |
+
],
|
| 503 |
+
"angle": 0,
|
| 504 |
+
"content": null
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"type": "image_caption",
|
| 508 |
+
"bbox": [
|
| 509 |
+
0.384,
|
| 510 |
+
0.341,
|
| 511 |
+
0.532,
|
| 512 |
+
0.373
|
| 513 |
+
],
|
| 514 |
+
"angle": 0,
|
| 515 |
+
"content": "(b) Simplification neighborhood of 1."
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "image",
|
| 519 |
+
"bbox": [
|
| 520 |
+
0.619,
|
| 521 |
+
0.174,
|
| 522 |
+
0.848,
|
| 523 |
+
0.339
|
| 524 |
+
],
|
| 525 |
+
"angle": 0,
|
| 526 |
+
"content": null
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "image_caption",
|
| 530 |
+
"bbox": [
|
| 531 |
+
0.619,
|
| 532 |
+
0.341,
|
| 533 |
+
0.766,
|
| 534 |
+
0.373
|
| 535 |
+
],
|
| 536 |
+
"angle": 0,
|
| 537 |
+
"content": "(c) Simplification neighborhood of 2."
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "image_caption",
|
| 541 |
+
"bbox": [
|
| 542 |
+
0.141,
|
| 543 |
+
0.393,
|
| 544 |
+
0.492,
|
| 545 |
+
0.409
|
| 546 |
+
],
|
| 547 |
+
"angle": 0,
|
| 548 |
+
"content": "Figure 2. Neighbor method for simplification."
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"bbox": [
|
| 553 |
+
0.14,
|
| 554 |
+
0.41,
|
| 555 |
+
0.854,
|
| 556 |
+
0.493
|
| 557 |
+
],
|
| 558 |
+
"angle": 0,
|
| 559 |
+
"content": "Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1."
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"bbox": [
|
| 564 |
+
0.14,
|
| 565 |
+
0.507,
|
| 566 |
+
0.857,
|
| 567 |
+
0.643
|
| 568 |
+
],
|
| 569 |
+
"angle": 0,
|
| 570 |
+
"content": "Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d)."
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "image",
|
| 574 |
+
"bbox": [
|
| 575 |
+
0.157,
|
| 576 |
+
0.651,
|
| 577 |
+
0.382,
|
| 578 |
+
0.816
|
| 579 |
+
],
|
| 580 |
+
"angle": 0,
|
| 581 |
+
"content": null
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "image_caption",
|
| 585 |
+
"bbox": [
|
| 586 |
+
0.156,
|
| 587 |
+
0.817,
|
| 588 |
+
0.277,
|
| 589 |
+
0.849
|
| 590 |
+
],
|
| 591 |
+
"angle": 0,
|
| 592 |
+
"content": "(a) Particles server \\((\\mathrm{D} = 2)\\)"
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "image",
|
| 596 |
+
"bbox": [
|
| 597 |
+
0.389,
|
| 598 |
+
0.651,
|
| 599 |
+
0.614,
|
| 600 |
+
0.816
|
| 601 |
+
],
|
| 602 |
+
"angle": 0,
|
| 603 |
+
"content": null
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "image_caption",
|
| 607 |
+
"bbox": [
|
| 608 |
+
0.389,
|
| 609 |
+
0.818,
|
| 610 |
+
0.577,
|
| 611 |
+
0.849
|
| 612 |
+
],
|
| 613 |
+
"angle": 0,
|
| 614 |
+
"content": "(b) Particles produce server \\((\\mathrm{D} = 3)\\)"
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "image",
|
| 618 |
+
"bbox": [
|
| 619 |
+
0.621,
|
| 620 |
+
0.651,
|
| 621 |
+
0.846,
|
| 622 |
+
0.816
|
| 623 |
+
],
|
| 624 |
+
"angle": 0,
|
| 625 |
+
"content": null
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "image_caption",
|
| 629 |
+
"bbox": [
|
| 630 |
+
0.619,
|
| 631 |
+
0.818,
|
| 632 |
+
0.848,
|
| 633 |
+
0.849
|
| 634 |
+
],
|
| 635 |
+
"angle": 0,
|
| 636 |
+
"content": "(c) Particles produce by server \\((\\mathrm{D} = 1)\\)"
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "image_caption",
|
| 640 |
+
"bbox": [
|
| 641 |
+
0.141,
|
| 642 |
+
0.868,
|
| 643 |
+
0.595,
|
| 644 |
+
0.884
|
| 645 |
+
],
|
| 646 |
+
"angle": 0,
|
| 647 |
+
"content": "Figure 3. Particle simplification using server and distance."
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "page_number",
|
| 651 |
+
"bbox": [
|
| 652 |
+
0.483,
|
| 653 |
+
0.97,
|
| 654 |
+
0.516,
|
| 655 |
+
0.98
|
| 656 |
+
],
|
| 657 |
+
"angle": 0,
|
| 658 |
+
"content": "5/10"
|
| 659 |
+
}
|
| 660 |
+
],
|
| 661 |
+
[
|
| 662 |
+
{
|
| 663 |
+
"type": "image",
|
| 664 |
+
"bbox": [
|
| 665 |
+
0.151,
|
| 666 |
+
0.068,
|
| 667 |
+
0.323,
|
| 668 |
+
0.194
|
| 669 |
+
],
|
| 670 |
+
"angle": 0,
|
| 671 |
+
"content": null
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "image_caption",
|
| 675 |
+
"bbox": [
|
| 676 |
+
0.15,
|
| 677 |
+
0.196,
|
| 678 |
+
0.325,
|
| 679 |
+
0.22
|
| 680 |
+
],
|
| 681 |
+
"angle": 0,
|
| 682 |
+
"content": "(a) Particles and sensors (T = 0)."
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "image",
|
| 686 |
+
"bbox": [
|
| 687 |
+
0.329,
|
| 688 |
+
0.068,
|
| 689 |
+
0.497,
|
| 690 |
+
0.195
|
| 691 |
+
],
|
| 692 |
+
"angle": 0,
|
| 693 |
+
"content": null
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "image_caption",
|
| 697 |
+
"bbox": [
|
| 698 |
+
0.332,
|
| 699 |
+
0.196,
|
| 700 |
+
0.493,
|
| 701 |
+
0.207
|
| 702 |
+
],
|
| 703 |
+
"angle": 0,
|
| 704 |
+
"content": "(b) Sensors update \\((\\mathrm{T} = 1)\\)."
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "image",
|
| 708 |
+
"bbox": [
|
| 709 |
+
0.504,
|
| 710 |
+
0.069,
|
| 711 |
+
0.671,
|
| 712 |
+
0.195
|
| 713 |
+
],
|
| 714 |
+
"angle": 0,
|
| 715 |
+
"content": null
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "image_caption",
|
| 719 |
+
"bbox": [
|
| 720 |
+
0.517,
|
| 721 |
+
0.196,
|
| 722 |
+
0.658,
|
| 723 |
+
0.208
|
| 724 |
+
],
|
| 725 |
+
"angle": 0,
|
| 726 |
+
"content": "(c) First range \\((\\mathrm{T} = 2)\\)."
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "image",
|
| 730 |
+
"bbox": [
|
| 731 |
+
0.675,
|
| 732 |
+
0.069,
|
| 733 |
+
0.849,
|
| 734 |
+
0.195
|
| 735 |
+
],
|
| 736 |
+
"angle": 0,
|
| 737 |
+
"content": null
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "image_caption",
|
| 741 |
+
"bbox": [
|
| 742 |
+
0.685,
|
| 743 |
+
0.196,
|
| 744 |
+
0.84,
|
| 745 |
+
0.208
|
| 746 |
+
],
|
| 747 |
+
"angle": 0,
|
| 748 |
+
"content": "(d) Second range \\((\\mathrm{T} = 3)\\)."
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"type": "image_caption",
|
| 752 |
+
"bbox": [
|
| 753 |
+
0.14,
|
| 754 |
+
0.238,
|
| 755 |
+
0.496,
|
| 756 |
+
0.255
|
| 757 |
+
],
|
| 758 |
+
"angle": 0,
|
| 759 |
+
"content": "Figure 4. Simplification using bandwidth size."
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "title",
|
| 763 |
+
"bbox": [
|
| 764 |
+
0.142,
|
| 765 |
+
0.288,
|
| 766 |
+
0.491,
|
| 767 |
+
0.308
|
| 768 |
+
],
|
| 769 |
+
"angle": 0,
|
| 770 |
+
"content": "4. EXPERIMENTAL RESULTS"
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "text",
|
| 774 |
+
"bbox": [
|
| 775 |
+
0.139,
|
| 776 |
+
0.322,
|
| 777 |
+
0.807,
|
| 778 |
+
0.372
|
| 779 |
+
],
|
| 780 |
+
"angle": 0,
|
| 781 |
+
"content": "The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods."
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "title",
|
| 785 |
+
"bbox": [
|
| 786 |
+
0.142,
|
| 787 |
+
0.39,
|
| 788 |
+
0.348,
|
| 789 |
+
0.407
|
| 790 |
+
],
|
| 791 |
+
"angle": 0,
|
| 792 |
+
"content": "4.1 Data visualization"
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "text",
|
| 796 |
+
"bbox": [
|
| 797 |
+
0.139,
|
| 798 |
+
0.419,
|
| 799 |
+
0.849,
|
| 800 |
+
0.572
|
| 801 |
+
],
|
| 802 |
+
"angle": 0,
|
| 803 |
+
"content": "We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors."
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "text",
|
| 807 |
+
"bbox": [
|
| 808 |
+
0.139,
|
| 809 |
+
0.585,
|
| 810 |
+
0.854,
|
| 811 |
+
0.756
|
| 812 |
+
],
|
| 813 |
+
"angle": 0,
|
| 814 |
+
"content": "The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way."
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "image",
|
| 818 |
+
"bbox": [
|
| 819 |
+
0.153,
|
| 820 |
+
0.879,
|
| 821 |
+
0.368,
|
| 822 |
+
0.923
|
| 823 |
+
],
|
| 824 |
+
"angle": 0,
|
| 825 |
+
"content": null
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "image",
|
| 829 |
+
"bbox": [
|
| 830 |
+
0.399,
|
| 831 |
+
0.765,
|
| 832 |
+
0.607,
|
| 833 |
+
0.9
|
| 834 |
+
],
|
| 835 |
+
"angle": 0,
|
| 836 |
+
"content": null
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "image_caption",
|
| 840 |
+
"bbox": [
|
| 841 |
+
0.438,
|
| 842 |
+
0.907,
|
| 843 |
+
0.547,
|
| 844 |
+
0.922
|
| 845 |
+
],
|
| 846 |
+
"angle": 0,
|
| 847 |
+
"content": "(b) Room one."
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "image",
|
| 851 |
+
"bbox": [
|
| 852 |
+
0.645,
|
| 853 |
+
0.766,
|
| 854 |
+
0.851,
|
| 855 |
+
0.9
|
| 856 |
+
],
|
| 857 |
+
"angle": 0,
|
| 858 |
+
"content": null
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "image_caption",
|
| 862 |
+
"bbox": [
|
| 863 |
+
0.692,
|
| 864 |
+
0.907,
|
| 865 |
+
0.8,
|
| 866 |
+
0.922
|
| 867 |
+
],
|
| 868 |
+
"angle": 0,
|
| 869 |
+
"content": "(c) Room two."
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "page_number",
|
| 873 |
+
"bbox": [
|
| 874 |
+
0.483,
|
| 875 |
+
0.97,
|
| 876 |
+
0.516,
|
| 877 |
+
0.98
|
| 878 |
+
],
|
| 879 |
+
"angle": 0,
|
| 880 |
+
"content": "6/10"
|
| 881 |
+
}
|
| 882 |
+
],
|
| 883 |
+
[
|
| 884 |
+
{
|
| 885 |
+
"type": "image_caption",
|
| 886 |
+
"bbox": [
|
| 887 |
+
0.141,
|
| 888 |
+
0.066,
|
| 889 |
+
0.445,
|
| 890 |
+
0.083
|
| 891 |
+
],
|
| 892 |
+
"angle": 0,
|
| 893 |
+
"content": "Figure 5. Data use to model the system."
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "title",
|
| 897 |
+
"bbox": [
|
| 898 |
+
0.143,
|
| 899 |
+
0.099,
|
| 900 |
+
0.326,
|
| 901 |
+
0.117
|
| 902 |
+
],
|
| 903 |
+
"angle": 0,
|
| 904 |
+
"content": "4.2 Level of details"
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"bbox": [
|
| 909 |
+
0.14,
|
| 910 |
+
0.127,
|
| 911 |
+
0.859,
|
| 912 |
+
0.368
|
| 913 |
+
],
|
| 914 |
+
"angle": 0,
|
| 915 |
+
"content": "In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph \\(^{20}\\) as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances."
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "table",
|
| 919 |
+
"bbox": [
|
| 920 |
+
0.145,
|
| 921 |
+
0.376,
|
| 922 |
+
0.855,
|
| 923 |
+
0.451
|
| 924 |
+
],
|
| 925 |
+
"angle": 0,
|
| 926 |
+
"content": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>3900</td><td>240</td><td>36</td></tr></table>"
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "image_caption",
|
| 930 |
+
"bbox": [
|
| 931 |
+
0.141,
|
| 932 |
+
0.484,
|
| 933 |
+
0.482,
|
| 934 |
+
0.501
|
| 935 |
+
],
|
| 936 |
+
"angle": 0,
|
| 937 |
+
"content": "Table 1. Results of clustering simplification."
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "image",
|
| 941 |
+
"bbox": [
|
| 942 |
+
0.159,
|
| 943 |
+
0.51,
|
| 944 |
+
0.501,
|
| 945 |
+
0.706
|
| 946 |
+
],
|
| 947 |
+
"angle": 0,
|
| 948 |
+
"content": null
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "image_caption",
|
| 952 |
+
"bbox": [
|
| 953 |
+
0.236,
|
| 954 |
+
0.706,
|
| 955 |
+
0.425,
|
| 956 |
+
0.722
|
| 957 |
+
],
|
| 958 |
+
"angle": 0,
|
| 959 |
+
"content": "(a) \\(\\mathrm{D} = 500\\) to 1000."
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "image",
|
| 963 |
+
"bbox": [
|
| 964 |
+
0.505,
|
| 965 |
+
0.51,
|
| 966 |
+
0.848,
|
| 967 |
+
0.706
|
| 968 |
+
],
|
| 969 |
+
"angle": 0,
|
| 970 |
+
"content": null
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "image_caption",
|
| 974 |
+
"bbox": [
|
| 975 |
+
0.576,
|
| 976 |
+
0.706,
|
| 977 |
+
0.776,
|
| 978 |
+
0.721
|
| 979 |
+
],
|
| 980 |
+
"angle": 0,
|
| 981 |
+
"content": "(b) \\(D = 1000\\) to 1500."
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "image_caption",
|
| 985 |
+
"bbox": [
|
| 986 |
+
0.14,
|
| 987 |
+
0.74,
|
| 988 |
+
0.492,
|
| 989 |
+
0.757
|
| 990 |
+
],
|
| 991 |
+
"angle": 0,
|
| 992 |
+
"content": "Figure 6. Clustering visualization algorithms."
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"bbox": [
|
| 997 |
+
0.139,
|
| 998 |
+
0.757,
|
| 999 |
+
0.844,
|
| 1000 |
+
0.86
|
| 1001 |
+
],
|
| 1002 |
+
"angle": 0,
|
| 1003 |
+
"content": "Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance."
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "page_number",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
0.483,
|
| 1009 |
+
0.97,
|
| 1010 |
+
0.517,
|
| 1011 |
+
0.98
|
| 1012 |
+
],
|
| 1013 |
+
"angle": 0,
|
| 1014 |
+
"content": "7/10"
|
| 1015 |
+
}
|
| 1016 |
+
],
|
| 1017 |
+
[
|
| 1018 |
+
{
|
| 1019 |
+
"type": "table",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
0.145,
|
| 1022 |
+
0.063,
|
| 1023 |
+
0.855,
|
| 1024 |
+
0.138
|
| 1025 |
+
],
|
| 1026 |
+
"angle": 0,
|
| 1027 |
+
"content": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>22950</td><td>4554</td><td>3524</td></tr></table>"
|
| 1028 |
+
},
|
| 1029 |
+
{
|
| 1030 |
+
"type": "table_caption",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
0.14,
|
| 1033 |
+
0.171,
|
| 1034 |
+
0.474,
|
| 1035 |
+
0.187
|
| 1036 |
+
],
|
| 1037 |
+
"angle": 0,
|
| 1038 |
+
"content": "Table 2. Results of neighbor simplification."
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "image",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
0.154,
|
| 1044 |
+
0.196,
|
| 1045 |
+
0.497,
|
| 1046 |
+
0.403
|
| 1047 |
+
],
|
| 1048 |
+
"angle": 0,
|
| 1049 |
+
"content": null
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "image_caption",
|
| 1053 |
+
"bbox": [
|
| 1054 |
+
0.228,
|
| 1055 |
+
0.403,
|
| 1056 |
+
0.422,
|
| 1057 |
+
0.42
|
| 1058 |
+
],
|
| 1059 |
+
"angle": 0,
|
| 1060 |
+
"content": "(a) Neighborhood 1."
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "image",
|
| 1064 |
+
"bbox": [
|
| 1065 |
+
0.501,
|
| 1066 |
+
0.196,
|
| 1067 |
+
0.844,
|
| 1068 |
+
0.403
|
| 1069 |
+
],
|
| 1070 |
+
"angle": 0,
|
| 1071 |
+
"content": null
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "image_caption",
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
0.575,
|
| 1077 |
+
0.403,
|
| 1078 |
+
0.769,
|
| 1079 |
+
0.42
|
| 1080 |
+
],
|
| 1081 |
+
"angle": 0,
|
| 1082 |
+
"content": "(b) Neighborhood 2."
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "image_caption",
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
0.14,
|
| 1088 |
+
0.441,
|
| 1089 |
+
0.607,
|
| 1090 |
+
0.459
|
| 1091 |
+
],
|
| 1092 |
+
"angle": 0,
|
| 1093 |
+
"content": "Figure 7. Clustering visualization algorithms using neighbor."
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "text",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
0.139,
|
| 1099 |
+
0.471,
|
| 1100 |
+
0.845,
|
| 1101 |
+
0.556
|
| 1102 |
+
],
|
| 1103 |
+
"angle": 0,
|
| 1104 |
+
"content": "The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method."
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "table",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
0.145,
|
| 1110 |
+
0.567,
|
| 1111 |
+
0.855,
|
| 1112 |
+
0.641
|
| 1113 |
+
],
|
| 1114 |
+
"angle": 0,
|
| 1115 |
+
"content": "<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>120000</td><td>30000</td><td>7500</td><td>1875</td></tr></table>"
|
| 1116 |
+
},
|
| 1117 |
+
{
|
| 1118 |
+
"type": "table_caption",
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
0.14,
|
| 1121 |
+
0.675,
|
| 1122 |
+
0.425,
|
| 1123 |
+
0.689
|
| 1124 |
+
],
|
| 1125 |
+
"angle": 0,
|
| 1126 |
+
"content": "Table 3. Several resolution of model."
|
| 1127 |
+
},
|
| 1128 |
+
{
|
| 1129 |
+
"type": "text",
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
0.139,
|
| 1132 |
+
0.692,
|
| 1133 |
+
0.854,
|
| 1134 |
+
0.776
|
| 1135 |
+
],
|
| 1136 |
+
"angle": 0,
|
| 1137 |
+
"content": "Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method."
|
| 1138 |
+
},
|
| 1139 |
+
{
|
| 1140 |
+
"type": "image",
|
| 1141 |
+
"bbox": [
|
| 1142 |
+
0.15,
|
| 1143 |
+
0.78,
|
| 1144 |
+
0.382,
|
| 1145 |
+
0.913
|
| 1146 |
+
],
|
| 1147 |
+
"angle": 0,
|
| 1148 |
+
"content": null
|
| 1149 |
+
},
|
| 1150 |
+
{
|
| 1151 |
+
"type": "image_caption",
|
| 1152 |
+
"bbox": [
|
| 1153 |
+
0.235,
|
| 1154 |
+
0.912,
|
| 1155 |
+
0.299,
|
| 1156 |
+
0.924
|
| 1157 |
+
],
|
| 1158 |
+
"angle": 0,
|
| 1159 |
+
"content": "(a) \\(\\mathrm{T} = 0\\)"
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"type": "image",
|
| 1163 |
+
"bbox": [
|
| 1164 |
+
0.384,
|
| 1165 |
+
0.78,
|
| 1166 |
+
0.617,
|
| 1167 |
+
0.913
|
| 1168 |
+
],
|
| 1169 |
+
"angle": 0,
|
| 1170 |
+
"content": null
|
| 1171 |
+
},
|
| 1172 |
+
{
|
| 1173 |
+
"type": "image_caption",
|
| 1174 |
+
"bbox": [
|
| 1175 |
+
0.468,
|
| 1176 |
+
0.912,
|
| 1177 |
+
0.532,
|
| 1178 |
+
0.924
|
| 1179 |
+
],
|
| 1180 |
+
"angle": 0,
|
| 1181 |
+
"content": "(b) \\(\\mathrm{T} = 1\\)"
|
| 1182 |
+
},
|
| 1183 |
+
{
|
| 1184 |
+
"type": "image",
|
| 1185 |
+
"bbox": [
|
| 1186 |
+
0.619,
|
| 1187 |
+
0.78,
|
| 1188 |
+
0.851,
|
| 1189 |
+
0.913
|
| 1190 |
+
],
|
| 1191 |
+
"angle": 0,
|
| 1192 |
+
"content": null
|
| 1193 |
+
},
|
| 1194 |
+
{
|
| 1195 |
+
"type": "image_caption",
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
0.704,
|
| 1198 |
+
0.912,
|
| 1199 |
+
0.766,
|
| 1200 |
+
0.924
|
| 1201 |
+
],
|
| 1202 |
+
"angle": 0,
|
| 1203 |
+
"content": "(c) \\(\\mathrm{T} = 4\\)"
|
| 1204 |
+
},
|
| 1205 |
+
{
|
| 1206 |
+
"type": "page_number",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
0.483,
|
| 1209 |
+
0.97,
|
| 1210 |
+
0.516,
|
| 1211 |
+
0.98
|
| 1212 |
+
],
|
| 1213 |
+
"angle": 0,
|
| 1214 |
+
"content": "8/10"
|
| 1215 |
+
}
|
| 1216 |
+
],
|
| 1217 |
+
[
|
| 1218 |
+
{
|
| 1219 |
+
"type": "header",
|
| 1220 |
+
"bbox": [
|
| 1221 |
+
0.141,
|
| 1222 |
+
0.066,
|
| 1223 |
+
0.415,
|
| 1224 |
+
0.082
|
| 1225 |
+
],
|
| 1226 |
+
"angle": 0,
|
| 1227 |
+
"content": "Figure 8. Bandwidth simplification."
|
| 1228 |
+
},
|
| 1229 |
+
{
|
| 1230 |
+
"type": "title",
|
| 1231 |
+
"bbox": [
|
| 1232 |
+
0.143,
|
| 1233 |
+
0.115,
|
| 1234 |
+
0.341,
|
| 1235 |
+
0.136
|
| 1236 |
+
],
|
| 1237 |
+
"angle": 0,
|
| 1238 |
+
"content": "5. CONCLUSION"
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "text",
|
| 1242 |
+
"bbox": [
|
| 1243 |
+
0.142,
|
| 1244 |
+
0.15,
|
| 1245 |
+
0.851,
|
| 1246 |
+
0.252
|
| 1247 |
+
],
|
| 1248 |
+
"angle": 0,
|
| 1249 |
+
"content": "In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering."
|
| 1250 |
+
},
|
| 1251 |
+
{
|
| 1252 |
+
"type": "text",
|
| 1253 |
+
"bbox": [
|
| 1254 |
+
0.142,
|
| 1255 |
+
0.253,
|
| 1256 |
+
0.854,
|
| 1257 |
+
0.372
|
| 1258 |
+
],
|
| 1259 |
+
"angle": 0,
|
| 1260 |
+
"content": "For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data \"dressing\". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader."
|
| 1261 |
+
},
|
| 1262 |
+
{
|
| 1263 |
+
"type": "title",
|
| 1264 |
+
"bbox": [
|
| 1265 |
+
0.143,
|
| 1266 |
+
0.404,
|
| 1267 |
+
0.421,
|
| 1268 |
+
0.424
|
| 1269 |
+
],
|
| 1270 |
+
"angle": 0,
|
| 1271 |
+
"content": "ACKNOWLEDGMENTS"
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"type": "text",
|
| 1275 |
+
"bbox": [
|
| 1276 |
+
0.14,
|
| 1277 |
+
0.439,
|
| 1278 |
+
0.838,
|
| 1279 |
+
0.507
|
| 1280 |
+
],
|
| 1281 |
+
"angle": 0,
|
| 1282 |
+
"content": "We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support."
|
| 1283 |
+
},
|
| 1284 |
+
{
|
| 1285 |
+
"type": "title",
|
| 1286 |
+
"bbox": [
|
| 1287 |
+
0.143,
|
| 1288 |
+
0.54,
|
| 1289 |
+
0.311,
|
| 1290 |
+
0.559
|
| 1291 |
+
],
|
| 1292 |
+
"angle": 0,
|
| 1293 |
+
"content": "REFERENCES"
|
| 1294 |
+
},
|
| 1295 |
+
{
|
| 1296 |
+
"type": "ref_text",
|
| 1297 |
+
"bbox": [
|
| 1298 |
+
0.142,
|
| 1299 |
+
0.574,
|
| 1300 |
+
0.744,
|
| 1301 |
+
0.606
|
| 1302 |
+
],
|
| 1303 |
+
"angle": 0,
|
| 1304 |
+
"content": "[1] Clark, J. H., \"Hierarchical geometric models for visible surface algorithms,\" Communications of the ACM 19(10), 547-554 (1976)."
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "ref_text",
|
| 1308 |
+
"bbox": [
|
| 1309 |
+
0.143,
|
| 1310 |
+
0.609,
|
| 1311 |
+
0.81,
|
| 1312 |
+
0.641
|
| 1313 |
+
],
|
| 1314 |
+
"angle": 0,
|
| 1315 |
+
"content": "[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., \"Interactive visualization of 3d mantle convection,\" Visual Geosciences (2008)."
|
| 1316 |
+
},
|
| 1317 |
+
{
|
| 1318 |
+
"type": "ref_text",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
0.143,
|
| 1321 |
+
0.643,
|
| 1322 |
+
0.831,
|
| 1323 |
+
0.675
|
| 1324 |
+
],
|
| 1325 |
+
"angle": 0,
|
| 1326 |
+
"content": "[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., \"Parallel interactive visualization of 3d mantle convection,\" IEEE Comput. Sci. Eng. 3(4), 29-37 (1996)."
|
| 1327 |
+
},
|
| 1328 |
+
{
|
| 1329 |
+
"type": "ref_text",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
0.143,
|
| 1332 |
+
0.677,
|
| 1333 |
+
0.844,
|
| 1334 |
+
0.71
|
| 1335 |
+
],
|
| 1336 |
+
"angle": 0,
|
| 1337 |
+
"content": "[4] Reeves, W. T., \"Particle systems - a technique for modeling a class of fuzzy objects,\" ACM Transactions on Graphics 2, 359-376 (1983)."
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "ref_text",
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
0.144,
|
| 1343 |
+
0.712,
|
| 1344 |
+
0.571,
|
| 1345 |
+
0.727
|
| 1346 |
+
],
|
| 1347 |
+
"angle": 0,
|
| 1348 |
+
"content": "[5] Latta, L., \"Building a million particle system,\" (2004)."
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "ref_text",
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
0.143,
|
| 1354 |
+
0.728,
|
| 1355 |
+
0.783,
|
| 1356 |
+
0.761
|
| 1357 |
+
],
|
| 1358 |
+
"angle": 0,
|
| 1359 |
+
"content": "[6] Kapferer, W. and Riser, T., \"Visualization needs and techniques for astrophysical simulations,\" New Journal of Physics 10(12), 125008 (15pp) (2008)."
|
| 1360 |
+
},
|
| 1361 |
+
{
|
| 1362 |
+
"type": "ref_text",
|
| 1363 |
+
"bbox": [
|
| 1364 |
+
0.143,
|
| 1365 |
+
0.763,
|
| 1366 |
+
0.837,
|
| 1367 |
+
0.794
|
| 1368 |
+
],
|
| 1369 |
+
"angle": 0,
|
| 1370 |
+
"content": "[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., \"Decimation of triangle meshes,\" 65-70 (1992)."
|
| 1371 |
+
},
|
| 1372 |
+
{
|
| 1373 |
+
"type": "ref_text",
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
0.143,
|
| 1376 |
+
0.797,
|
| 1377 |
+
0.696,
|
| 1378 |
+
0.812
|
| 1379 |
+
],
|
| 1380 |
+
"angle": 0,
|
| 1381 |
+
"content": "[8] Luebke, D., \"A survey of polygonal simplification algorithms,\" (1997)."
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "ref_text",
|
| 1385 |
+
"bbox": [
|
| 1386 |
+
0.143,
|
| 1387 |
+
0.814,
|
| 1388 |
+
0.854,
|
| 1389 |
+
0.847
|
| 1390 |
+
],
|
| 1391 |
+
"angle": 0,
|
| 1392 |
+
"content": "[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., \"Voxel based object simplification,\" in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995)."
|
| 1393 |
+
},
|
| 1394 |
+
{
|
| 1395 |
+
"type": "ref_text",
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
0.143,
|
| 1398 |
+
0.848,
|
| 1399 |
+
0.776,
|
| 1400 |
+
0.88
|
| 1401 |
+
],
|
| 1402 |
+
"angle": 0,
|
| 1403 |
+
"content": "[10] Lorensen, W. E. and Cline, H. E., \"Marching cubes: A high resolution 3d surface construction algorithm,\" SIGGRAPH Comput. Graph. 21(4), 163-169 (1987)."
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "ref_text",
|
| 1407 |
+
"bbox": [
|
| 1408 |
+
0.143,
|
| 1409 |
+
0.882,
|
| 1410 |
+
0.791,
|
| 1411 |
+
0.914
|
| 1412 |
+
],
|
| 1413 |
+
"angle": 0,
|
| 1414 |
+
"content": "[11] Pauly, M., Gross, M., and Kobbelt, L. P., \"Efficient simplification of point-sampled surfaces,\" (2002)."
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "ref_text",
|
| 1418 |
+
"bbox": [
|
| 1419 |
+
0.143,
|
| 1420 |
+
0.916,
|
| 1421 |
+
0.844,
|
| 1422 |
+
0.932
|
| 1423 |
+
],
|
| 1424 |
+
"angle": 0,
|
| 1425 |
+
"content": "[12] Moenning, C., , Moenning, C., and Dodgson, N. A., \"Intrinsic point cloud simplification,\""
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "list",
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
0.142,
|
| 1431 |
+
0.574,
|
| 1432 |
+
0.854,
|
| 1433 |
+
0.932
|
| 1434 |
+
],
|
| 1435 |
+
"angle": 0,
|
| 1436 |
+
"content": null
|
| 1437 |
+
},
|
| 1438 |
+
{
|
| 1439 |
+
"type": "page_number",
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
0.482,
|
| 1442 |
+
0.97,
|
| 1443 |
+
0.516,
|
| 1444 |
+
0.98
|
| 1445 |
+
],
|
| 1446 |
+
"angle": 0,
|
| 1447 |
+
"content": "9/10"
|
| 1448 |
+
}
|
| 1449 |
+
],
|
| 1450 |
+
[
|
| 1451 |
+
{
|
| 1452 |
+
"type": "ref_text",
|
| 1453 |
+
"bbox": [
|
| 1454 |
+
0.143,
|
| 1455 |
+
0.066,
|
| 1456 |
+
0.195,
|
| 1457 |
+
0.08
|
| 1458 |
+
],
|
| 1459 |
+
"angle": 0,
|
| 1460 |
+
"content": "(2004)."
|
| 1461 |
+
},
|
| 1462 |
+
{
|
| 1463 |
+
"type": "ref_text",
|
| 1464 |
+
"bbox": [
|
| 1465 |
+
0.143,
|
| 1466 |
+
0.083,
|
| 1467 |
+
0.798,
|
| 1468 |
+
0.132
|
| 1469 |
+
],
|
| 1470 |
+
"angle": 0,
|
| 1471 |
+
"content": "[13] Song, H. and Feng, H.-Y., \"A progressive point cloud simplification algorithm with preserved sharp edge data,\" The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009)."
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "ref_text",
|
| 1475 |
+
"bbox": [
|
| 1476 |
+
0.143,
|
| 1477 |
+
0.134,
|
| 1478 |
+
0.841,
|
| 1479 |
+
0.166
|
| 1480 |
+
],
|
| 1481 |
+
"angle": 0,
|
| 1482 |
+
"content": "[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., \"Spyglass: a wireless sensor network visualizer,\" SIGBED Rev. 2(1), 1-6 (2005)."
|
| 1483 |
+
},
|
| 1484 |
+
{
|
| 1485 |
+
"type": "ref_text",
|
| 1486 |
+
"bbox": [
|
| 1487 |
+
0.143,
|
| 1488 |
+
0.168,
|
| 1489 |
+
0.798,
|
| 1490 |
+
0.2
|
| 1491 |
+
],
|
| 1492 |
+
"angle": 0,
|
| 1493 |
+
"content": "[15] Avis, D. and Bhattacharya, B., \"Algorithms for computing d-dimensional voronoi diagrams and their duals,\" 1, 159-180 (1983)."
|
| 1494 |
+
},
|
| 1495 |
+
{
|
| 1496 |
+
"type": "ref_text",
|
| 1497 |
+
"bbox": [
|
| 1498 |
+
0.144,
|
| 1499 |
+
0.202,
|
| 1500 |
+
0.851,
|
| 1501 |
+
0.235
|
| 1502 |
+
],
|
| 1503 |
+
"angle": 0,
|
| 1504 |
+
"content": "[16] Rycroft, C. H., \"Voro++: a three-dimensional voronoi cell library in \\( c++ \\),\" Chaos 19 (2009). Lawrence Berkeley National Laboratory."
|
| 1505 |
+
},
|
| 1506 |
+
{
|
| 1507 |
+
"type": "ref_text",
|
| 1508 |
+
"bbox": [
|
| 1509 |
+
0.143,
|
| 1510 |
+
0.237,
|
| 1511 |
+
0.819,
|
| 1512 |
+
0.268
|
| 1513 |
+
],
|
| 1514 |
+
"angle": 0,
|
| 1515 |
+
"content": "[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., \"The quickhull algorithm for convex hulls,\" ACM Trans. Math. Softw. 22(4), 469-483 (1996)."
|
| 1516 |
+
},
|
| 1517 |
+
{
|
| 1518 |
+
"type": "ref_text",
|
| 1519 |
+
"bbox": [
|
| 1520 |
+
0.143,
|
| 1521 |
+
0.271,
|
| 1522 |
+
0.773,
|
| 1523 |
+
0.303
|
| 1524 |
+
],
|
| 1525 |
+
"angle": 0,
|
| 1526 |
+
"content": "[18] Snyder, J. M. and Barr, A. H., \"Ray tracing complex models containing surface tessellations,\" SIGGRAPH Comput. Graph. 21(4), 119-128 (1987)."
|
| 1527 |
+
},
|
| 1528 |
+
{
|
| 1529 |
+
"type": "ref_text",
|
| 1530 |
+
"bbox": [
|
| 1531 |
+
0.144,
|
| 1532 |
+
0.305,
|
| 1533 |
+
0.836,
|
| 1534 |
+
0.336
|
| 1535 |
+
],
|
| 1536 |
+
"angle": 0,
|
| 1537 |
+
"content": "[19] Hoppe, H., \"Progressive meshes. computer graphics,\" SIGGRAPH96 Proceedings, 99108 (1996)."
|
| 1538 |
+
},
|
| 1539 |
+
{
|
| 1540 |
+
"type": "ref_text",
|
| 1541 |
+
"bbox": [
|
| 1542 |
+
0.143,
|
| 1543 |
+
0.339,
|
| 1544 |
+
0.774,
|
| 1545 |
+
0.372
|
| 1546 |
+
],
|
| 1547 |
+
"angle": 0,
|
| 1548 |
+
"content": "[20] Burns, D. and Osfield, R., \"Open scene graph a: Introduction, b: Examples and applications,\" 265 (2004)."
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "list",
|
| 1552 |
+
"bbox": [
|
| 1553 |
+
0.143,
|
| 1554 |
+
0.066,
|
| 1555 |
+
0.851,
|
| 1556 |
+
0.372
|
| 1557 |
+
],
|
| 1558 |
+
"angle": 0,
|
| 1559 |
+
"content": null
|
| 1560 |
+
},
|
| 1561 |
+
{
|
| 1562 |
+
"type": "text",
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
0.167,
|
| 1565 |
+
0.394,
|
| 1566 |
+
0.39,
|
| 1567 |
+
0.409
|
| 1568 |
+
],
|
| 1569 |
+
"angle": 0,
|
| 1570 |
+
"content": "Further author information:"
|
| 1571 |
+
},
|
| 1572 |
+
{
|
| 1573 |
+
"type": "text",
|
| 1574 |
+
"bbox": [
|
| 1575 |
+
0.167,
|
| 1576 |
+
0.412,
|
| 1577 |
+
0.482,
|
| 1578 |
+
0.427
|
| 1579 |
+
],
|
| 1580 |
+
"angle": 0,
|
| 1581 |
+
"content": "Lange B.: E-mail: benoit.lange@lirmm.fr"
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "text",
|
| 1585 |
+
"bbox": [
|
| 1586 |
+
0.168,
|
| 1587 |
+
0.429,
|
| 1588 |
+
0.544,
|
| 1589 |
+
0.444
|
| 1590 |
+
],
|
| 1591 |
+
"angle": 0,
|
| 1592 |
+
"content": "Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr"
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "text",
|
| 1596 |
+
"bbox": [
|
| 1597 |
+
0.168,
|
| 1598 |
+
0.446,
|
| 1599 |
+
0.498,
|
| 1600 |
+
0.46
|
| 1601 |
+
],
|
| 1602 |
+
"angle": 0,
|
| 1603 |
+
"content": "Puech W.: E-mail: william.puech@lirmm.fr"
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "text",
|
| 1607 |
+
"bbox": [
|
| 1608 |
+
0.168,
|
| 1609 |
+
0.463,
|
| 1610 |
+
0.468,
|
| 1611 |
+
0.478
|
| 1612 |
+
],
|
| 1613 |
+
"angle": 0,
|
| 1614 |
+
"content": "Rey H.: E-mail:REYHERVE@fr.ibm.com"
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "text",
|
| 1618 |
+
"bbox": [
|
| 1619 |
+
0.168,
|
| 1620 |
+
0.48,
|
| 1621 |
+
0.529,
|
| 1622 |
+
0.495
|
| 1623 |
+
],
|
| 1624 |
+
"angle": 0,
|
| 1625 |
+
"content": "Vasques X.: E-mail: xaviervasques@fr.ibm.com"
|
| 1626 |
+
},
|
| 1627 |
+
{
|
| 1628 |
+
"type": "page_number",
|
| 1629 |
+
"bbox": [
|
| 1630 |
+
0.481,
|
| 1631 |
+
0.97,
|
| 1632 |
+
0.519,
|
| 1633 |
+
0.98
|
| 1634 |
+
],
|
| 1635 |
+
"angle": 0,
|
| 1636 |
+
"content": "10/10"
|
| 1637 |
+
}
|
| 1638 |
+
]
|
| 1639 |
+
]
|
data/2025/2503_09xxx/2503.09198/ef3c6a72-d844-464a-90c5-502ddc16df65_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ccb0b38b36e023702ff1726eaa2bd337a2762c59724e11e0f0dfcd4402cebf8
|
| 3 |
+
size 964405
|
data/2025/2503_09xxx/2503.09198/full.md
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A 3D particle visualization system for temperature management
|
| 2 |
+
|
| 3 |
+
Lange B. $^{a}$ , Rodriguez N. $^{a}$ , Puech W. $^{a}$ , Rey H. $^{b}$ and Vasques X. $^{b}$
|
| 4 |
+
|
| 5 |
+
$^{a}$ LIRMM, 141 rue ADA, Montpellier, France;
|
| 6 |
+
|
| 7 |
+
<sup>b</sup> IBM, Rue de la vieille poste, Montpellier,
|
| 8 |
+
|
| 9 |
+
France
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
This paper deals with a 3D visualization technique proposed to analyze and manage energy efficiency from a data center. Data are extracted from sensors located in the IBM Green Data Center in Montpellier France. These sensors measure different information such as hygrometry, pressure and temperature. We want to visualize in real-time the large among of data produced by these sensors. A visualization engine has been designed, based on particles system and a client server paradigm. In order to solve performance problems, a Level Of Detail solution has been developed. These methods are based on the earlier work introduced by J. Clark in $1976^{1}$ . In this paper we introduce a particle method used for this work and subsequently we explain different simplification methods applied to improve our solution.
|
| 14 |
+
|
| 15 |
+
Keywords: 3D Visualization, Sensors, Particles, Client/Server, Level Of Details
|
| 16 |
+
|
| 17 |
+
# 1. INTRODUCTION
|
| 18 |
+
|
| 19 |
+
In this paper, we present a method to produce a 3D visualization for analyzing and managing temperature. Data are extracted from sensors located in the IBM Green Data Center in Montpellier, which provides many different types of information like temperature, pressure or hygrometry. In our system, sensors are placed in a virtual room and the internal space is modeled using particles. The main constraint here is to produce a real-time rendering. However, latency appears du to the number of vertices. In this paper, we use a solution called LOD (Level Of Detail) to produce multi resolution 3D objects. This solution has been introduced in 1976 by J. Clark<sup>1</sup>. In this paper, J. Clark introduces the use of several mesh resolutions to simplify the 3D scene complexity. In our work, we use various simplification methods to provide interactive rendering and allows rendering the most important part of data extracted from sensors. In this paper, we describe how we create a room, and the methods used to produce different resolution visualization. In Section 2, we introduce related work on particles systems and LOD. In Section 3, we expose our solution to simplify particles system. In Section 4 we give some results and finally, in Section 5 we present our conclusions and future work.
|
| 20 |
+
|
| 21 |
+
# 2. RELATED WORK
|
| 22 |
+
|
| 23 |
+
In this section we present several previous works concerning data visualization, particle systems and level of detail methods.
|
| 24 |
+
|
| 25 |
+
Some previous work present solutions to visualize large data flow extracted from mantle convection. M. Damon et al. $^{2}$ and K. E. Jordan et al. $^{3}$ present interactive viewers for this kind of data. These data are computed by using Hight Performance Computing (HPC) and visualized on a large display. The rendering is calculated by using another HPC. The data flow is very important and a real-time 3D simulation is hard to obtain. W. Kapfer and
|
| 26 |
+
|
| 27 |
+
T. Riser<sup>6</sup> introduce how to use particle system to visualize astronomic simulation, particles representing space objects. The number of particles is extremely important for computing motion in real-time. GPU computing is preferred to render instead of a common HPC solution. To display their data, they have developed their own 3D graphical engine. The space objects are represented by point sprite instead of sphere. Lights are used to give a spherical aspect to the point sprite. This solution allows to render more stars than spherical object method. The 3D engine provides different rendering methods to group space objects: cell simplification or extraction of isosurface. The use of GPU seems quite well for a particle solution, parallel processing allows to render large data; the astrological data seems to be well suited.
|
| 28 |
+
|
| 29 |
+
In 1976, J. Clark introduces Level Of Detail (LOD) concept<sup>1</sup>. LOD consists of several resolution meshes for using them at different distances from the camera. Firstly, designer produces these meshes. First algorithms, in 1992 Schroeder et al. developed a method by decimation for simplify the mesh<sup>7</sup>. It analyses mesh geometry and evaluates the complexity of triangles. Vertices are removed if only constraints set by the user are respected. Vertices are removed and gaps are filled using triangulation. These algorithms of simplification are not enough to simplify mesh efficiently because shape is not always totally respected. D. Luebke, in 1997, has proposed a taxonomy of mesh simplification<sup>8</sup>. He presented the most used algorithms. He extracted different ways to use each algorithm. But in this paper, only one solution works with volumetric mesh<sup>9</sup>. T. He et al. propose a method based on voxel simplification by using a grid for clustering voxels. A marching cube<sup>10</sup> algorithm was applied to produce a surface mesh. But this simplification algorithm did not preserve the shape of the mesh. In our work, we look for point cloud simplification. Indeed, previous methods which deal with simplification for surface point cloud like<sup>11-13</sup> are not adapted to our case. All of these methods produce LOD for surface mesh and point cloud is extracted from scanner.
|
| 30 |
+
|
| 31 |
+
# 3. PROPOSED APPROACH
|
| 32 |
+
|
| 33 |
+
This section presents the different methods that are used to visualize a kind of data from Green Data Center (GDC). The main goal is to be able to visualize in real-time the evolution of temperature in the data center. For this, we use a special particle method. Particles are located using a segmentation algorithm based on Voronoi cell extraction and Delaunay triangulation. The latency due to the large flow of particles is avoided by using a client server paradigm. We improve our solution by using LOD methods to simplify rendering.
|
| 34 |
+
|
| 35 |
+
# 3.1 Particle systems
|
| 36 |
+
|
| 37 |
+
Rooms are the bases of our study. For modeling a room, we extract the shape of the space representation which is composed by a box with three measures: length $(l \in \mathbb{R})$ , width $(w \in \mathbb{R})$ , height $(h \in \mathbb{R})$ . Sensors are represented by $S = \{\mathrm{S}_1, \dots, \mathrm{S}_M\}$ , where $M$ is the number of sensors. Sensors $\mathrm{S}_i (\mathrm{i} \in \{1, \dots, M\})$ are placed on the space on a layer $\mathbf{L} \in \mathbb{N}$ and have a location represented by: $\{\mathbf{X}_i, \mathbf{Y}_i, \mathbf{L}_j\}$ with $\mathbf{X}_i \in \mathbb{R}$ , $\mathbf{Y}_i \in \mathbb{R}$ and $j$ is the layer used. For modeling the space inside a room, we use a particle system instead of 2D map representations which have some lacks. $^{14}$ Actually 2D map does not allow having a real visualization of space. A particle visualization gives a better efficiency for modeling space. We use a large number of particles to represent the entire space. $\mathbf{N} \in \mathbb{N}$ represents the number of particles in the room. It can be calculated using:
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
N = \frac {\left(\left(l + 1\right) \times (h + 1) \times (w + 1)\right)}{\delta^ {3}} \tag {1}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
where $\delta \in \mathbb{R}$ is the space between particles. The particle grid is regular. In this model, three layers of temperature sensors compose rooms. They are defined according to their real locations in the data center. Figure ?? presents the different layers of sensors in the data center.
|
| 44 |
+
|
| 45 |
+
Particles carry information, and flow motion can be simulated if needed by changing the value of particles and the computational cost is inferior.
|
| 46 |
+
|
| 47 |
+
# 3.2 Segmentation algorithms
|
| 48 |
+
|
| 49 |
+
In our solution, each sensors has an influence on surrounding particules. To calculate the set of particles in the sensor range, we use two methods: Voronoi cells extraction and Delaunay triangulation.
|
| 50 |
+
|
| 51 |
+
Voronoi cells is a method to extract a partition of space $^{15}$ . This method is available for $\phi$ dimensions where $\phi \in [1, +\infty]$ , but most of implementations are done in 2D. Tools for extracting 3D Voronoi diagrams exist: Voro++ and QHull but particles are discrete and these solutions are not suitable because they extract Voronoi diagram in a continuous way. Then we designed our own method based on sphere expansion. We search nearest sensors for each particle. This part allows to weight particles outside the sensors mesh. A second method to weight the interior of the sensors mesh is used. We extract the mesh tetrahedron of sensors using the Delaunay triangulation implemented in QHull. This method was used to analyze the location of particle. We compute the exact location using ray tracing on the soup of tetrahedron. First, we search the nearest particles inside the hull of each tetrahedron. We extract the normal of each face of tetrahedron and we apply these normals on each particle. If the ray cuts three faces or more, the particle is inside the tetrahedron. This method is cost expensive and done in preprocessing. Moreover, particles are static and position didn't need to be update.
|
| 52 |
+
|
| 53 |
+
# 3.3 Client server paradigm
|
| 54 |
+
|
| 55 |
+
To improve computation, a client server paradigm is used. We define a low cost communication protocol to transfer data from a server to a client. Server computes the modification of particles and the client displays the results. This protocol works in five steps. These steps are: sending header, sending sensor data, sending particle data, sending footer and receiving acknowledgment/language command from client. At each step, the server waits the acknowledgment from the client. We develop two ways to send data. The first sends the entire point cloud (sensors and particles). The biggest problem of this method is the
|
| 56 |
+
|
| 57 |
+
transmission of data. Sensors are sent with their coordinates and their value. We encode these data in bit words. For the particles data, the same method was used. The footer was sent for closing the communication. The second method is used to reduce efficiently the communication cost. We only send modified sensors and particles. The id and the new value is sent instead of coordinates. The last step is the command sent by the client. It allows the user to interact with the server. We use it to modify the camera viewpoint.
|
| 58 |
+
|
| 59 |
+
# 3.4 Level of detail for particles
|
| 60 |
+
|
| 61 |
+
Level of detail (LOD) is one of the most important methods in computer graphics. It allows to solve rendering problems or performance problems. This method consists by producing several resolution of a 3D object. In our works, we use some features to define the object resolution: hardware and viewpoint. Hardware and viewpoint do not need the same data structure and we need to recompute it for each modification of the viewpoint or when hardware changes. LOD was defined by two problems statement. The first one uses a sample of original points, the second one uses a new point data set. In this part, we define six methods to produce LOD. The four first methods are for the client, the other are for the server.
|
| 62 |
+
|
| 63 |
+
# Problems statement:
|
| 64 |
+
|
| 65 |
+
For this two approaches, we have a set $\omega$ of Vertices $V$ , $V = \{V_1, \ldots, V_\omega\}$ . Each vertex is defined in $\mathbb{R}^3$ . Simplify a mesh using a sample vertex means $\omega > \omega 2$ , where $\omega 2$ is the size of the second data set. For approach 1, we obtain a new object $\mathrm{V}2 = \{\mathrm{V}2_1, \ldots, \mathrm{V}2_\omega\}$ with fewer points than V but V 2 is a subset of V. For approach 2, we obtain a new object $\mathrm{V}3 = \{\mathrm{V}3_1, \ldots, \mathrm{V}3_\omega\}$ with fewer points than V but each point in V 3 is a new vertex.
|
| 66 |
+
|
| 67 |
+
In Section 2 we have presented methods to produce simplification. A few were designed for volumetric simplification. In this section, we propose several methods to produce different volumetric simplifications on our client. We develop four approaches to simplify 3D objects: clustering, neighbor simplification and two approaches based on server. Clustering method was based on He et al. $^{9}$ works, it consists of clustering particles using a 3D grid. Cells sizes of grid are set depending to the viewpoint of the camera. Clusters were being weight with the average of the different values of particles. The position is the barycenter of these particles. Figures 1(a)-1(e) give some examples of simplification using clustering solution. Figure 1(a) present the original point of cloud mesh. Figure
|
| 68 |
+
|
| 69 |
+
1(b) and 1(d) give two different methods for clustering. And finally, Figure 1(c) and 1(e) give the results of clustering methods.
|
| 70 |
+
|
| 71 |
+
Figure 1. Clustering method for simplification point cloud.
|
| 72 |
+

|
| 73 |
+
The second solution used is based on neighborhood extraction. Before runtime, we extract all neighbors of a particle. We measure the distance between each particle. Some optimization can help to decrease complexity: we can estimate easily in our structure which particle is closer to another one (using the fact that particle grid is regular). After this,
|
| 74 |
+
|
| 75 |
+
we extract the main value of particles. We explore each neighbor of particles and we keep the most important. In some cases, the most important can be the high values, in other the low values and in other both of them. This solution is able to produce a low resolution model with the most important information structure. Several low resolution models are created by exploring deeper in neighborhood. Figures 2(a)-2(c) illustrate a neighbor, and two simplifications of this mesh.
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
(a) Neighborhood cloud.
|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
(b) Simplification neighborhood of 1.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
(c) Simplification neighborhood of 2.
|
| 85 |
+
Figure 2. Neighbor method for simplification.
|
| 86 |
+
|
| 87 |
+
Other methods were based on server instead of client. Client sent via TCP connection his viewpoint. The server recomputes the particles structure and recreates the entire structure. With this solution, it is possible to produce a point cloud resolution depending on hardware. Figure 3(a) presents particles rendering with a distance of 2 from the camera. Figure 3(b) is the decimation produced with a distance of 3 and Figure 3(c) is a distance of 1.
|
| 88 |
+
|
| 89 |
+
Another method was based on Voronoi diffusion of temperature. The bandwidth for transmitting data is limited. We developed Voronoi temperature diffusion to solve this communication. In this approach, we update data using sphere expansion. Each time, we update particles depending on their distance from sensors. The more particles are distant from sensors the later they will be refreshed. This method sends only modified particles. The bandwidth is saved and the visualization gives a flow effect. Figure 4(a) represents values at time 0. At time 1, values of sensors change, 4(b). After time 2, we update a first range of particles 4(c) and finally the second range 4(d).
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
(a) Particles server $(\mathrm{D} = 2)$
|
| 93 |
+
Figure 3. Particle simplification using server and distance.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
(b) Particles produce server $(\mathrm{D} = 3)$
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
(c) Particles produce by server $(\mathrm{D} = 1)$
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
(a) Particles and sensors (T = 0).
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
(b) Sensors update $(\mathrm{T} = 1)$ .
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
(c) First range $(\mathrm{T} = 2)$ .
|
| 109 |
+
Figure 4. Simplification using bandwidth size.
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
(d) Second range $(\mathrm{T} = 3)$ .
|
| 113 |
+
|
| 114 |
+
# 4. EXPERIMENTAL RESULTS
|
| 115 |
+
|
| 116 |
+
The data are extracted from two rooms of the IBM data center. Firstly, we present our method for rendering the room, and later we present our results using Level Of Detail methods.
|
| 117 |
+
|
| 118 |
+
# 4.1 Data visualization
|
| 119 |
+
|
| 120 |
+
We want to visualize and manage the consumption of a data center. For the visualization, we want to use an IFC viewer. But the IFC model for GDC is not available yet. Data center extraction of the room space is for the moment done by hand. The room is empty and was represent by a simple shape a box with 4 meters length, 3 meters width and 2.5 meters height. We use point cloud visualization based on particle paradigm. We use the two rooms of the data center and we put the same number of particles (30000) and 35 sensors distributed on three layers at 1 meter; 2 meter and on the ground. We define high and low temperature regarding the real sensors value. Figure 5(a) presents temperature color scale, Figure 5(b) and Figure 5(c) present data center sensors.
|
| 121 |
+
|
| 122 |
+
The next step is to interpolate data from sensors. For this, we extract the sensor mesh. We use QHULL to produce a soup of tetrahedrons. Particles need to be located. We can determine which tetrahedron is the nearest, we extract the box hull of tetrahedron and we apply for each particle the norms of each tetrahedron face. If these rays cut three or more faces, then particle is inside the tetrahedron. With this method, we can determine exactly the location of each particles regarding to the tetrahedrons, a weight is given to them easily. It was used to apply a coefficient to the value of each vertex of tetrahedron. For the outside particles, another solution was used: Voronoi cells. This method is based on a discrete extraction of Voronoi cells. We use our own method because other method like Voro ++ or QHull extract Voronoi diagram in a continuous way.
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
(b) Room one.
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
(c) Room two.
|
| 131 |
+
|
| 132 |
+
# 4.2 Level of details
|
| 133 |
+
|
| 134 |
+
In the earlier days of this project, first solution proposed gives a low frame rates, about 15 FPS (Frame Per Second): visualization was not in real-time (real-time is about 24 FPS). For solving this problem, we define a client server paradigm. This solution allows to produce a real-time rendering on the client. Figure ?? gives an example of LOD for particles. We use Openscenegraph $^{20}$ as a 3D engine. It owns several features useful in LOD. A special object is defined to manage multi-resolution model. It calculates the distance of the object from the camera. For our experimentation we use five resolutions of mesh. The first mesh was the original mesh, it is set at 0 to 500. The next mesh was set at 500 to 1000, the next at 1000 to 1500 and the other at 1500 to 2000. These three meshes were constructed by specific LOD methods: clustering and significant vertices. Clustering defines a 3D grid inside the room. The size of each cell depends on the viewpoint location. The size of the cluster depends on the visibility of the clustered particles. First results are given Figure 6(a) and 6(b). Value of cluster is an average of clustered value. The number of points of the final mesh depends on the grid size. Table 1 shows the results at several distances.
|
| 135 |
+
|
| 136 |
+
<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>3900</td><td>240</td><td>36</td></tr></table>
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
Figure 5. Data use to model the system.
|
| 140 |
+
(a) $\mathrm{D} = 500$ to 1000.
|
| 141 |
+
Figure 6. Clustering visualization algorithms.
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
Table 1. Results of clustering simplification.
|
| 145 |
+
(b) $D = 1000$ to 1500.
|
| 146 |
+
|
| 147 |
+
Significant points method extracts the neighbors for each particle. We extract the highest and lowest temperatures, by exploring the neighborhood of a particle, in order to have significant vertices of the model. For the first step of simplified model we explore neighbor. For the second model, we explore neighbor and neighbor of neighbor, etc. This solution simplifies drastically the model. First results are given Figure ??-??. Table 2 shows the number of vertices at several distance.
|
| 148 |
+
|
| 149 |
+
<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>30000</td><td>22950</td><td>4554</td><td>3524</td></tr></table>
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
(a) Neighborhood 1.
|
| 153 |
+
Figure 7. Clustering visualization algorithms using neighbor.
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
(b) Neighborhood 2.
|
| 157 |
+
|
| 158 |
+
The first server solution receives orders from client as presented Section 3.4. We calculate the viewpoint distance and we send data according to it. A new structure is recalculated if the camera is too far from the object. After the recomputing, we send the new data. This solution allows the user to receive more or less data according to its distance to the object. Table 3 shows some different resolutions produced with this method.
|
| 159 |
+
|
| 160 |
+
Table 2. Results of neighbor simplification.
|
| 161 |
+
|
| 162 |
+
<table><tr><td></td><td>D = 0 to 500</td><td>D = 500 to 1000</td><td>D = 1000 to 1500</td><td>D = 1500 to 2000</td></tr><tr><td>C = X</td><td>120000</td><td>30000</td><td>7500</td><td>1875</td></tr></table>
|
| 163 |
+
|
| 164 |
+
Table 3. Several resolution of model.
|
| 165 |
+
|
| 166 |
+
Another solution is to use bandwidth latency. We send data at several times, we do not send the entire set of data but only modified particles. We send at first time the sensors data, and subsequently we send a range of data (the nearest). After few minutes, all data are sent. This solution gives good results, and simulates a thermal diffusion in the whole structure of particles. Figure 8(a)-8(c) illustrate this method.
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(a) $\mathrm{T} = 0$
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
(b) $\mathrm{T} = 1$
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
(c) $\mathrm{T} = 4$
|
| 176 |
+
|
| 177 |
+
# 5. CONCLUSION
|
| 178 |
+
|
| 179 |
+
In this paper, we have presented a method to visualize sensors data extracted from a Green Data Center. This approach produces interpolation visualization for managing and visualizing data. This interpolation used a Delaunay triangulation and a cell extraction based on Voronoi. An unusual way of use particles helps to process data. First results present the solution proposed to visualize the inside of a GDC space. The second results proposed in this paper aim to improve the rendering.
|
| 180 |
+
|
| 181 |
+
For this, first step introduces a client/server protocol a second step illustrates methods to simplify the model. With these different approaches we improve the rendering time, preserving most important data are kept. In future works, we will work on data "dressing". We want to find a way to improve rendering of the scene using meatballs or marching cube algorithms. A main constraint of this work is real-time computation. Future work also concern to add rooms to the visualization. At present, we only visualize a single room. We want to visualize building, and complex form, by using an IFC loader.
|
| 182 |
+
|
| 183 |
+
# ACKNOWLEDGMENTS
|
| 184 |
+
|
| 185 |
+
We want to thanks the PSSC (Products and Solutions Support Center) team of IBM Montpellier for having provided the necessary equipment and data need for this experimentation. And we thank the FUI (Fonds Unique Interministriel) for their financial support.
|
| 186 |
+
|
| 187 |
+
# REFERENCES
|
| 188 |
+
|
| 189 |
+
[1] Clark, J. H., "Hierarchical geometric models for visible surface algorithms," Communications of the ACM 19(10), 547-554 (1976).
|
| 190 |
+
[2] Damon, M., Kameyama, M., Knox, M., Porter, D., Yuen, D., and Sevre, E., "Interactive visualization of 3d mantle convection," Visual Geosciences (2008).
|
| 191 |
+
[3] Jordan, K. E., Yuen, D. A., Reuteler, D. M., Zhang, S., and Haimes, R., "Parallel interactive visualization of 3d mantle convection," IEEE Comput. Sci. Eng. 3(4), 29-37 (1996).
|
| 192 |
+
[4] Reeves, W. T., "Particle systems - a technique for modeling a class of fuzzy objects," ACM Transactions on Graphics 2, 359-376 (1983).
|
| 193 |
+
[5] Latta, L., "Building a million particle system," (2004).
|
| 194 |
+
[6] Kapferer, W. and Riser, T., "Visualization needs and techniques for astrophysical simulations," New Journal of Physics 10(12), 125008 (15pp) (2008).
|
| 195 |
+
[7] Schroeder, W. J., Zarge, J. A., and Lorensen, W. E., "Decimation of triangle meshes," 65-70 (1992).
|
| 196 |
+
[8] Luebke, D., "A survey of polygonal simplification algorithms," (1997).
|
| 197 |
+
[9] He, T., Hong, L., Kaufman, A., Varshney, A., and Wang, S., "Voxel based object simplification," in [Proc. SIGGRAPH Symposium on Interactive 3D Graphics], 296-303 (1995).
|
| 198 |
+
[10] Lorensen, W. E. and Cline, H. E., "Marching cubes: A high resolution 3d surface construction algorithm," SIGGRAPH Comput. Graph. 21(4), 163-169 (1987).
|
| 199 |
+
[11] Pauly, M., Gross, M., and Kobbelt, L. P., "Efficient simplification of point-sampled surfaces," (2002).
|
| 200 |
+
[12] Moenning, C., , Moenning, C., and Dodgson, N. A., "Intrinsic point cloud simplification,"
|
| 201 |
+
|
| 202 |
+
(2004).
|
| 203 |
+
[13] Song, H. and Feng, H.-Y., "A progressive point cloud simplification algorithm with preserved sharp edge data," The International Journal of Advanced Manufacturing Technology 45, 583-592 (November 2009).
|
| 204 |
+
[14] Buschmann, C., Pfisterer, D., Fischer, S., Fekete, S. P., and Kröller, A., "Spyglass: a wireless sensor network visualizer," SIGBED Rev. 2(1), 1-6 (2005).
|
| 205 |
+
[15] Avis, D. and Bhattacharya, B., "Algorithms for computing d-dimensional voronoi diagrams and their duals," 1, 159-180 (1983).
|
| 206 |
+
[16] Rycroft, C. H., "Voro++: a three-dimensional voronoi cell library in $c++$ ," Chaos 19 (2009). Lawrence Berkeley National Laboratory.
|
| 207 |
+
[17] Barber, C. B., Dobkin, D. P., and Huhdanpaa, H., "The quickhull algorithm for convex hulls," ACM Trans. Math. Softw. 22(4), 469-483 (1996).
|
| 208 |
+
[18] Snyder, J. M. and Barr, A. H., "Ray tracing complex models containing surface tessellations," SIGGRAPH Comput. Graph. 21(4), 119-128 (1987).
|
| 209 |
+
[19] Hoppe, H., "Progressive meshes. computer graphics," SIGGRAPH96 Proceedings, 99108 (1996).
|
| 210 |
+
[20] Burns, D. and Osfield, R., "Open scene graph a: Introduction, b: Examples and applications," 265 (2004).
|
| 211 |
+
|
| 212 |
+
Further author information:
|
| 213 |
+
|
| 214 |
+
Lange B.: E-mail: benoit.lange@lirmm.fr
|
| 215 |
+
|
| 216 |
+
Rodriguez N.: E-mail: nancy.rodriguez@lirmm.fr
|
| 217 |
+
|
| 218 |
+
Puech W.: E-mail: william.puech@lirmm.fr
|
| 219 |
+
|
| 220 |
+
Rey H.: E-mail:REYHERVE@fr.ibm.com
|
| 221 |
+
|
| 222 |
+
Vasques X.: E-mail: xaviervasques@fr.ibm.com
|
data/2025/2503_09xxx/2503.09198/images/0d4e22f04d3ea25761125c93fc6d70fe856bf840ef353c043f32d21c9633724e.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/0e41ba0aeddc8af5f2b6664c64abea63f9b32bfd4692c38f916d07f501d537e5.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/23944f9963b290146f2ac445e8970545bcf00b2949e999067a13468653d749cc.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/2eb0e7b79700cbbd72a8a244992163f567dbbd68f9d3db6bad3e5808cda4dabb.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/2fcee6efa3b1d3c44144e2639f3857a15c4079f838c4f9132953fd427a8b1be9.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/389ef0f63a90171c14db5cd9a925792e376222c8c56a5801cacb3dcc99360c96.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/3cd8a951d8771e165427c2faca0d2508e4c8cef94fb244dc3a04f2ca031996a3.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/4050e8f862ac40be7bc6d5c239997192325936e244911b8d69fe60a4ab8810b8.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/416dbc943d118d6834e80c8c5ad759b34f940700b96fd0b87bb23c082f10cf10.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/5dabdfa6b0129921b2abb27785be9608573a25f26e4646d20c6b92250fad1414.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/69ae709df8988787ff641b5b6eb5a608ca0636bba7110a3a4735064168d117da.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/7c729f1706460b0bf0adde7a8dbd071f9ae77f611ef3f85b3d951fb62795016c.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/7eac2440a2fedd66d943c82b390c3a9df5d95d019d523a29c7a3e2f9c7218928.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/8c16ad7a59f34e5974a4322466cec0149f8905a1a1b1cfadff910a3fec50d004.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/92c09c0b767ea453098777786fb815e8c2f833a8abc790807397f42e9a6f6887.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/9434d9911bf9b1660b16b84b9672b9a68ac66eec53e61ed383a2c752f12025a2.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_09xxx/2503.09198/images/96bce3148486d00ea374e73aeb6caf0bb16a8ee9cc4e78a0b161de5fdfe93db0.jpg
ADDED
|
Git LFS Details
|