Add files using upload-large-folder tool
Browse files- 20240819/2212.02495v4.json +315 -0
- 20240819/2303.04238v5.json +0 -0
- 20240819/2303.12341v2.json +0 -0
- 20240819/2305.03515v7.json +0 -0
- 20240819/2306.12341v2.json +0 -0
- 20240819/2307.10230v4.json +0 -0
- 20240819/2307.11091v2.json +60 -0
- 20240819/2309.05919v2.json +0 -0
- 20240819/2402.01975v3.json +0 -0
- 20240819/2403.06009v3.json +0 -0
- 20240819/2403.15760v2.json +0 -0
- 20240819/2403.18198v2.json +439 -0
- 20240819/2404.09937v2.json +0 -0
- 20240819/2404.12965v2.json +203 -0
- 20240819/2405.11715v2.json +167 -0
- 20240819/2405.12535v2.json +485 -0
- 20240819/2406.13213v2.json +0 -0
- 20240819/2407.15622v2.json +154 -0
- 20240819/2407.19965v2.json +190 -0
- 20240819/2408.05884v2.json +101 -0
- 20240819/2408.07967v2.json +578 -0
- 20240819/2408.08673v2.json +154 -0
- 20240819/2408.09624v1.json +557 -0
- 20240819/2408.09657v1.json +0 -0
- 20240819/2408.09702v1.json +0 -0
- 20240819/2408.09722v1.json +0 -0
- 20240819/2408.09727v1.json +102 -0
- 20240819/2408.09735v1.json +0 -0
- 20240819/2408.09756v1.json +249 -0
- 20240819/2408.09765v1.json +571 -0
- 20240819/2408.09773v1.json +136 -0
- 20240819/2408.09792v1.json +105 -0
- 20240819/2408.09819v1.json +0 -0
- 20240819/2408.09842v1.json +293 -0
- 20240819/2408.09912v1.json +0 -0
- 20240819/2408.09936v1.json +22 -0
- 20240819/2408.09954v1.json +226 -0
- 20240819/2408.09962v1.json +301 -0
- 20240819/2408.09972v1.json +132 -0
- 20240819/2408.10002v1.json +0 -0
- 20240819/2408.10043v1.json +126 -0
- 20240819/2408.10086v1.json +462 -0
- 20240819/2408.10153v1.json +365 -0
- 20240819/2408.10181v1.json +0 -0
- 20240819/2408.10285v1.json +242 -0
- 20240819/2408.10351v1.json +93 -0
- 20240819/2408.10383v1.json +0 -0
- 20240819/2408.10384v1.json +441 -0
- 20240819/2408.10414v1.json +257 -0
- 20240819/2408.11868v1.json +0 -0
20240819/2212.02495v4.json
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials",
|
| 3 |
+
"abstract": "Zernike radial polynomials (ZRP) play a significant role in application areas such as optics design, imaging systems, and image processing systems. Currently, there are two kinds of numerical schemes for computing the ZRP automatically with computer programs: one is based on the definition in which the factorial operations may lead to the overflow problem and the high order derivatives are troublesome, and the other is based on recursion which is either unstable or with high computational complexity. In this paper, our emphasis is focused on exploring the balanced binary tree (BBT) schemes for computing the ZRP: firstly an elegant formulae for computation is established; secondly the recursive and iterative algorithms based-on BBT are proposed; thirdly the computational complexity of the algorithms are analyzed rigorously; finally the performance of BBT schemes by testing the running time is verified and validated. Theoretical analysis shows that the computational complexity of balanced binary tree recursive algorithm (BBRTA) and iterative algorithm are exponential and quadratic respectively, which coincides with the running time test very well. Experiments show that the time consumption is about microseconds with different computation platforms for the balanced binary tree iterative algorithm (BBTIA), which is stable and efficient for real-time applications. In the sense of STEM education, the connection of the BBT and ZRP exhibits the beauty and applications of discrete mathematical structure behind the engineering problem, which is worthy of introducing to the college students, computer programmers and optics engineers.\nKeywords:\nOptics design, Zernike radial polynomials (ZRP), High-precision computation, Balanced binary tree (BBT), Recursion and Iteration, Computational complexity, Real-time application",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The Zernike radial polynomials (ZRP), named after Frits Zernike, are important for atmospheric turbulence analysis[1 ###reference_b1###], aberration analysis in imaging system and optics design [2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###, 8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###, 12 ###reference_b12###], and image processing [13 ###reference_b13###]. Mathematically, ZRP are a sequence of orthogonal polynomials which are defined on the unit disk and derived from the pupils of imaging system. Generally, for the radial degree and azimuthal order such that and is even, the ZRP with double indices are denoted by [9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###]\nin which the parameter for the number of terms is\nand the coefficients can be expressed by\nwhere\nis the binomial coefficient.\nThe symmetry characterized by\nimplies that it is sufficient to consider the non-negative for computing the value of . It is easy to find that it is not wise for us to compute directly based on the definition with the equation (1 ###reference_###) since the factorial operations may lead to the overflow problem and the high order derivatives are troublesome in practical computations and/or implementations with computer programs.\nIn the past decades of exploring the indirect computation methods for , there are several recursive methods to compute . In 1976, Kintner [14 ###reference_b14###] proposed the -recursive formulae\nwhere\nHowever, the stopping condition is unknown at that time and the formula is singular when .\nIn 1989, Prata and Rusch [15 ###reference_b15###] proposed the following recursive scheme\nwith the coefficients\nand stopping condition\nThere are two significant points for the stopping condition:\nit has significant impacts on the time complexity and space complexity of the computation;\nthe choice of stopping condition is not unique, which leads to different performance for the numeric schemes adopted for the objective of computation.\nFor computing the , Chong et. al. proposed a simple stopping condition for recursive process in 2003 [13 ###reference_b13###]\nIn 2013, Shakibaei and Paramesran [11 ###reference_b11###] reformulated the recursive relation in (8 ###reference_###) by\nand derived an alternative recursive scheme\nwith the stopping condition\nvia the properties of Chebyshev polynomials of the second kind.\nHowever, the computation process is rather slow with this recursive method. Chong et al. [13 ###reference_b13###] proposed the following -recursive scheme\nin which and are functions of and .\nThis -recursive scheme is more efficient than the other recursive schemes for computing . However, is a singular point in (15 ###reference_###) although is regular for all . Thus the computation will be unstable if is small enough.\nIn computer science, we know that the essence of recursion lies in two facts: there must be a stopping condition for the recursive procedure/function which calls itself; the recursion depth should not be large otherwise the computational complexity will be too large due to the massive memory consumption and long time consumption caused by the push-pop stacking processes.\nFor the available recursive schemes of computing at present, for large and difference of , the computation complexity of these recursive schemes is rather high which limits their applications. Generally, for the recursive problem with single integer as argument, it is easy to convert the recursive formulae to a more efficient iterative counterpart. However, for the with two integers and as arguments, there is a lack of feasible method to convert the recursive formula to iterative versions. Although Kintner\u2019s -recursive formula (6 ###reference_###) can be reformulated as an iterative formula, it is limited for ; Chong\u2019s -recursive scheme (15 ###reference_###) can be converted to its iterative version, however the singular point will still exist. For the coupled recursive formulae (8 ###reference_###) (or (12 ###reference_###) equivalently) and (13 ###reference_###), their iterative implementations are still to be explored.\nFor the computational complexity of the recursive algorithms available,\nShakibaei and Paramesran [11 ###reference_b11###] considered the time complexity by counting the number of addition and multiplication operations. However, their conclusion is arguable for some reasons: the space complexity of computation is ignored, the running time is not tested and the complexity just depends on the radial index instead of the double indices and . It should be noted that the space complexity of recursive algorithm is usually exponential or more higher. In consequence, the recursive schemes are not suitable for real-time applications.\nIn this paper, our objective is to explore an elegant formula to compute stably and propose novel recursive and iterative schemes with the help of balanced binary tree (BBT) structure. Our main contributions lie in the following perspectives:\nA novel formula for computing the ZRP is proposed, which stimulates the recursive and iterative schemes for the numerical computation.\nThe BBT structure of the novel formula is discovered, which deepens the understanding of the ZRP.\nBoth the recursive and iterative algorithms for computing the ZRP are designed, which refreshes the state-of-the-art of the computational complexity.\nThe rest of this paper is organized as follows: Section 2 ###reference_### deals with the preliminaries for developing objective algorithms; Section 3 ###reference_### discusses the BBT recursive scheme; Section 4 ###reference_### concerns the BBT iterative scheme; Section 5 ###reference_### focuses on the verification and validation; finally Section 6 ###reference_### gives the conclusion for our work."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "A Special Kind of Difference Equation",
|
| 21 |
+
"text": "For the discrete difference equation\nwith initial value where are constants and , its solution is\nThis formula will be used in analyzing the computational complexity of computing ."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Computing Powers of Real Number with Squaring",
|
| 27 |
+
"text": "For any and real number , the power can be computed fast by squaring, which is based on the following recursive formulae\nWith this formulae, for the power can be computed fast with the time complexity of since only times of multiplicative operation is required. However, for , is always , thus the complexity will be . The notations , ,\n, ,\n, , ,\n, \nand so on about the computational complexity are introduced in the appendix, please see Appendix A.2 ###reference_### for more details.\nThe iterative algorithm for computing is shown in Algorithm 2.2 ###reference_###.\nAlgorithm 1 Compute the power with squaring method in an iterative way\n\n\nObviously, the time complexity of Algorithm 2.2 ###reference_### is\nFor the purpose of computing the generalZRP , it is necessary to investigate the functions and since they can be used to generate formulae for as stopping/initial conditions for recursive/iterative processes. In the recursive tree, there are two types of leaf nodes: one is type A specified by , another is type B specified by .\nAlgorithm 2.2 ###reference_### and Algorithm 2.2 ###reference_### are used to compute the leaf nodes of type A with and type B with respectively.\nAlgorithm 2 Compute the value of a leaf node of type A with where .\n\n\nAlgorithm 3 Compute the value of a leaf node of type B with where .\n\n\nThe time flops for the leaf nodes of type A is given by\nAs a comparison, the time flops for the leaf nodes of type B can be expressed by\nTherefore, the computational complexity for the leaf nodes of type A and type B is always for or for when computing and with index and radius (or with index and radius )."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Recursive Scheme for Radial Polynomials",
|
| 33 |
+
"text": ""
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Computing the Radial Polynomials Recursively",
|
| 39 |
+
"text": "Our novel recursive formulae is a combination of the recursive schemes in [11 ###reference_b11###] and [15 ###reference_b15###]. The trick of the exploring is to reduce the difference of the up-down scripts appearing on the right hand side in (12 ###reference_###) and (13 ###reference_###) with a common constant so as to get a balanced result, see TABLE 1 ###reference_###. The larger the difference of the up-down scripts in is, viz. , the faster the recursive process is. For the two terms and in the right hand side of (12 ###reference_###), the reductions of the difference, namely , are and where is in the right hand side of recursive formula. Obviously, the reductions are not equal. Similarly, for the three terms in the right hand side of\n(12 ###reference_###), the reductions of the difference are ,\n and respectively. These quantities are also not balanced.\nIt should be noted that if the reduction of difference is , then it is slow for the recursive process to satisfy the stopping condition.\n###table_1### Let\nby multiplying with (13 ###reference_###) and eliminating the term in (12 ###reference_###), we immediately have\nEvidently, the differences of up-down scripts in and are the same, i.e.,\nIn this way, we can reduce the difference to or step by step with a common difference , and then the stopping condition (11 ###reference_###) can be applied naturally.\nIn conclusion, we can obtain the following complete recursive scheme\nFor practical computation, we can always set with at first. Before proceeding further, we give some interpretations about this recursive scheme:\nthe recursive process corresponds to a BBT as shown in FIGURE 1 ###reference_###;\nthe stopping conditions consists of two kinds of leaves:\ntype A, which corresponds to for ;\ntype B, which corresponds to for ;\nthe root node corresponds to the objective polynomial ;\ncomputing the values of the leaf nodes with type A and type B can be easily and fast performed with the stopping condition (11 ###reference_###);\nfor , the computation of corresponds to traversing the BBT and computing the value for each node in the tree according to (22 ###reference_###) and (23 ###reference_###) dynamically;\nthe number of levels of the nodes in the binary tree is equal to the parameter in (2 ###reference_###);\nthe number of nodes in the -th level is for and the total number of nodes is .\n###figure_1###"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Balanced Binary Tree Recursive Algorithm (BBTRA)",
|
| 45 |
+
"text": "For the given recursive formulae and stopping condition, generally it is easy to design a recursive algorithm.\nWith the help of the preceding two procedures CalcLeafNodetypeA and CalcLeafNodetypeB specified by the stopping condition (11 ###reference_###), we can\ndesign the balanced binary tree recursive algorithm (BBTRA) as listed in Algorithm 3.2 ###reference_###.\nAlgorithm 4 Compute the radial function recursively\n\n\nIt should be noted that is the difference odd integers or even integers, which implies that . Therefore, there is no case 1 in Algorithm 3.2 ###reference_###."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Computational Complexity of BBTRA",
|
| 51 |
+
"text": "For the non-negative double indices and such that , the recursive process will stop at the nodes of leaves when or ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.3.1",
|
| 55 |
+
"parent_section_id": "3.3",
|
| 56 |
+
"section_name": "3.3.1 Space Complexity of BBTRA",
|
| 57 |
+
"text": "The space complexity of computing is determined by the memories needed. For , we just need one variable to record the value of once the radius and integer is given, thus the corresponding space complexity is . However, for , all of the nodes in the binary tree determined by the recursive process will be kept for computing . Hence the storage is determined by the number of nodes of the BBT. As demonstrated by FIGURE 1 ###reference_###, the number of nodes is\nIn consequence, the space complexity is exponential, which increases very fast with the difference . In the sense of space complexity or memory consumption, the recursive scheme is not recommended for large value of . Particularly, for and , the space complexity is ."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.3.2",
|
| 61 |
+
"parent_section_id": "3.3",
|
| 62 |
+
"section_name": "3.3.2 Time Complexity of BBTRA",
|
| 63 |
+
"text": "The crucial result in our development of time complexity will be the following fundamental equation\nfor the non-leaf nodes of the BBT, which simplifies the analysis of computational complexity in a surprising way. Let\nthen we have\nAccording to (20 ###reference_###) and (21 ###reference_###), for we can obtain\nand\nFor , simple algebraic calculations show that\nand\nFor the addition operations, equation (25 ###reference_###) implies that\nThus\nLet\nthen\nhence we have the discrete difference equation\nAccording to (16 ###reference_###) and (17 ###reference_###), the solution to (38 ###reference_###) must be\nHence we have time flops for the addition operation\nSimilarly, for the multiplication operations, we have\nIn this way, we immediately obtain the following discrete difference equation\nand the initial value\nWith the help of (16 ###reference_###), (17 ###reference_###), (42 ###reference_###) and (43 ###reference_###), we can deduce that\nTherefore,\nThe total time flops for computing is\nwhich can be expressed with the big-O notation as follows\nClearly, in the sense of time complexity or time consumption, the recursive scheme is not recommended for the large value of ."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Iterative Scheme for Computing the Zernike Radial Polynomials",
|
| 69 |
+
"text": "For the recursive Algorithm 3.2 ###reference_###, its space complexity is exponential and the time complexity varies with the configuration of double indices and according to (47 ###reference_###). For large value of , it is necessary for us to reduce the computational complexity with iterative scheme instead of recursive scheme for computing ."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.1",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Computing the Radial Polynomials Iteratively",
|
| 75 |
+
"text": "The key issue of iterative computation lies in two points: firstly, we set the initial condition in an iterative scheme with the stopping condition in the corresponding recursive scheme; then we iterate by updating the state of nodes of interest via the primitive recursive formulae in a reverse direction. For the recursive problem with a single integer parameter, say , this can be done easily since there are just two directions for the variation of for the 1-dim problems. However, for the recursive problem with double integer parameters, say and , the iteration may be trouble because there is no simple direction for the updating process. Fortunately, for computing the ZRP, it is possible to iterate simply with the structure of BBT. Actually, what we should do is just accessing the nodes in BBT from the leaf nodes to the root node.\nFIGURE 2 ###reference_### shows the iterative process intuitively. For and , we start with the initial condition specified by\n(11 ###reference_###) in the leaves in the -th level, then compute the value of the upper nodes in the -th level according to (23 ###reference_###), finally update the state of the nodes. The formula in (25 ###reference_###) can be used for both recursion and iteration, which depends on the concrete\nusages. Moreover, it is obvious that there are some repetitive nodes in FIGURE 2 ###reference_###, which implies that we can save their calculation to accelerate the computation and reduce memories needed.\nFIGURE 3 ###reference_### demonstrates the simplification of the BBT with a 1-dim array to store the nodes of interest. By removing the redundant nodes, there are just independent leaves in the -th level of the BBT. We can use a sequence to store the initial values with the expression\nfor .\nWith the help of (23 ###reference_###), we can deduce that the iterative formulae for updating must be\nwhere denotes the -th iteration. It should be noted that the direction of iteration is from the bottom to top such that\nIn other words, the variable for iteration can be replaced by the index for the levels of the BBT. When , the iterative process stops and we have\nWhen the iteration ends, we obtain the byproduct stored in the sequence v:\nin which only is the computation result of and the others are discarded.\nIn the left part of FIGURE 3 ###reference_###, the computation of is illustrated intuitively where and . As a generalization, the right part of FIGURE 3 ###reference_### demonstrates the iterative process of computing for and .\n###figure_2### ###figure_3###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.2",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Balanced Binary Tree Iterative Algorithm (BBTIA)",
|
| 81 |
+
"text": "Although the principle of converting the recursive process to its iterative counterpart is intuitive, it is still necessary to present the iterative algorithm clearly for the purpose of implementation with computer programs. Algorithm 4.2 ###reference_### is the balanced binary tree iterative algorithm (BBTIA) for computing in an iterative way. We remark that the dynamic memory allocation in Algorithm 4.2 ###reference_###\nis necessary because the parameter is determined by the input double indices .\nAlgorithm 5 Compute the radial function iteratively"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Computational Complexity of BBTIA",
|
| 87 |
+
"text": ""
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.3.1",
|
| 91 |
+
"parent_section_id": "4.3",
|
| 92 |
+
"section_name": "4.3.1 Space Complexity of BBTIA",
|
| 93 |
+
"text": "For and , equation (11 ###reference_###) can be used and only one variable is needed to store the value of , hence the space complexity is .\nFor , the sequence is used to realize the computation of . The number of auxiliary variables (i.e., and ) is constant, hence memory assumption is dominated by the length of the sequence v. In summary, the space complexity for computing is"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.3.2",
|
| 97 |
+
"parent_section_id": "4.3",
|
| 98 |
+
"section_name": "4.3.2 Time Complexity of of BBTIA",
|
| 99 |
+
"text": "For and , the time computational complexity is determined by the Line 5 and Line 8 in Algorithm 4.2 ###reference_### respectively. The time complexity for the leaf nodes of type X is\naccording to (19 ###reference_###) where X is A or B.\nFor and , the time computational complexity is determined by the operations in the loops involved in Lines in Algorithm 4.2 ###reference_###. Let and ignore other line which the time cost is not important for complexity analysis, we can obtain the addition flops\nsince and multiplication flops\nrespectively.\nConsequently, we have\nAs an illustration, for we have , thus we can obtain\nParticularly, for , we can obtain TABLE 3 ###reference_###. The time flops are also determined by the operations in the loops involved on Line in Algorithm 4.2 ###reference_###. With the help of TABLE 3 ###reference_###, an argument similar to the process for finding where shows that\nsince for non-negative .\nAs a comparison, TABLE 4 ###reference_### shows the computational complexity with the notation for computing the ZRP ."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "Verification and Validation",
|
| 105 |
+
"text": "We implemented the novel recursive and iterative algorithms with the C programming language and compared the running time with Prata-Rusch\u2019s and Shakibaei-Paramesran\u2019s recursive methods. As an illustration, we have tested the practical running time for and respectively (where ) via an average value by repeating the algorithms of interest for 10 times.\n###figure_4### ###figure_5### As shown in FIGURE 4 ###reference_### from top to bottom, there are six curves of running time varying with the difference . Here we give some necessary interpretations:\nthe blue curve marked by circle and legend \"Our BBTIA\" corresponds to the running time of our BBTIA (Algorithm 4.2 ###reference_###), which is at the bottom in the subfigures;\nthe red curve marked by diamond and legend \"Our BBTRA\" corresponds to the running time of our BBTRA (Algorithm 3.2 ###reference_###), which looks like a straight line in the semilog diagram starting from in the subfigures since for the time complexity is exponential;\nthe dashed curve marked by upward-facing triangle and legend \"PrataRusch1989\" corresponds to the running time of the primitive Prata-Rusch\nrecursive scheme with the stopping condition (10 ###reference_###), which also looks like a horizontal line in the semilog diagram; it is over the straight line for our recursive Algorithm 3.2 ###reference_###, which implies the time computational complexity is much higher than the exponential complexity;\nthe blue curve marked by downward-facing triangle and legend \"PrataRuschBeta\" corresponds to the running time of the improved Prata-Rusch\nrecursive scheme with the stopping condition (11 ###reference_###),\nwhich reduces the time consumption for and remains the same with the counterpart of the primitive Prata-Rusch recursive scheme;\nthe dashed curve marked by square and legend \"ShakiParam2013\" corresponds to the running time of the primitive Shakebaei-Paramesran\nrecursive scheme with the stopping condition (14 ###reference_###)\nthe blue curve marked by hexagram and legend \"ShakiParamBeta\" corresponds to the running time of the improved Shakebaei-Paramesran\nrecursive scheme with the stopping condition (11 ###reference_###).\nFIGURE 4 ###reference_### shows clearly that\nthe running time for our iterative algorithm is the shortest since its time computational complexity is just and\nthe semilog curve for the running time for our recursive algorithm looks like a straight line, which\ncorresponds to the exponential complexity and coincides with the theoretical analysis above very well.\nFIGURE 4 ###reference_### also illustrates a quantitative result that for (i.e., or ): the worst case of time consumption for computing is about microseconds ( seconds) with recursive schemes, our BBTIA works very well and the time consumption is about microseconds, which implies that it is suitable for real-time applications.\nThe trends of curves in FIGURE 4 ###reference_### show that the time consumed increases with the growth of when and remains constant when . In [11 ###reference_b11###], the conclusion that the time complexity depends only on is arguable since the smaller the , the faster the algorithms are.\nThe computational complexity of recursive algorithms by Shakibaei-Paramesran and Prata-Rusch is much higher than the exponential complexity since the curve of time consumption is over the line specified by our recursive algorithm with exponential complexity. For our iterative algorithm with square complexity , the running time is within 1 micro-second, which implies that it is suitable for real-time applications.\nThe data for FIGURE 4 ###reference_### can be downloaded from the GitHub website:\n https://github.com/GrAbsRD/Zernike/tree/RadialPolynomialRunTime.\nIt should be pointed out that the configuration of our testing platform is as follows:\nDebian GNU/Linux 11 (bullseye) OS; Memory, 64GB RAM; Processor, AMD\u00ae Ryzen 7 58008-core processor16; Compiler, gcc-10.2.1 20210110 (Debian 10.2.1-6). For other computational platform, the running time may be different but the trends of the curves should be similar and the straight line for our recursive Algorithm 3.2 ###reference_### will still remain a straight line in the semilog diagram which illustrates the exponential complexity clearly.\nFurthermore, the time consumption of our iterative Algorithm 4.2 ###reference_### will stay at the bottom since its computational complexity is the lowest."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Conclusion",
|
| 111 |
+
"text": "The numerical computation of ZRP is a challenging problem due to the stability and the computational complexity. Our formulae\n(23 ###reference_###) for computing owns the following advantages:\nfirstly it is capable of computing the value of ZRP with high precision by avoiding computing the high order power functions where is a large positive integer;\nsecondly it has a simple algebraic structure for understanding, remembering and applications;\nthirdly it is symmetric which leads to a balanced binary tree for recursion and convenient theoretic analysis of computational complexity;\nfourthly the conversion of recursive process to the iterative version is easy and the quadratic complexity refreshes the state-of-the-art of the computational complexity for computing ; and\nfinally it leads to a stable computation process since there is no singularity in the expression.\nThe BBTRA proposed is the fastest recursive algorithm for computing the ZRP when compared with the other available recursive algorithms. The BBTIA is the first iterative algorithm for computing the ZRP which has the quadratic time complexity and it is suitable for real-time applications. The high precision can be achieved automatically due to the recursive and iterative property for computing polynomials. In the sense of STEM education, the connection of the BBT and ZRP exhibits the beauty and applications of discrete mathematical structure behind the engineering problem, which is worthy of introducing to the college students, computer programmers and optics engineers."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [
|
| 115 |
+
{
|
| 116 |
+
"section_id": "Appendix 1",
|
| 117 |
+
"parent_section_id": null,
|
| 118 |
+
"section_name": "Appendix A Notations for Integers and Computational Complexity",
|
| 119 |
+
"text": "For an integer , it is even if and only if , i.e., divides or equivalently ; otherwise, it is odd if and only if or equivalently .\nThe set of non-negative integers are denoted by\n.\nFor any real number , the maximal lower bound such that is called the floor of and it is denoted by\nSimilarly, the minimal upper bound such that is called the ceiling of and it is denoted by\nLet\n and\n\nbe the counting or times of multiplication and addition in some operation expression expr. The time complexity vector of computation (TCVC) for expr is defined by\nNote that we just list two components of here since the subtraction and division can be treated as addition and multiplication respectively for real numbers.\nThe time flops (TF) flops [16 ###reference_b16###], for computing expr is\nin which and are the time flops of multiplication and addition respectively.\nSimilarly, we use to denote the computation time for expr. We also use and to represent the computation time for the multiplications and additions involved in expr.\nGiven the time units and for multiplication and addition respectively, then\nwill be the vector of time units. The time for computing expr can be represented by\nif only multiplication and addition are essential for the total time consumed.\nFor the purpose of analyzing time complexity theoretically instead of estimating practical running time of programs, we can regard the units and as , thus\nthe time consumption is equal to the flops involved.\nLet denote the space complexity of computing expr,\nwhich means the memories required. For allocating memories for a sequence with positive length , the space complexity will be . If the memory consumption for the single element of is , then the total memories for the sequence of v will be . Just like the analysis of time complexity, the memory unit can be regarded as . Consequently, the key problem of estimating space complexity is to estimate the\ncounting of memories instead of concrete memory units.\nFor the algorithm named with Alg, its TCVC is\nand the time for computation is\nSimilarly, the space complexity of the algorithm Alg is\nIf there are some parameters for Alg, then we will take one of the following notations\nto represent the computational complexity according to practical requirements and interests.\nIn algorithm analysis, we take the big-O notation [17 ###reference_b17###, 18 ###reference_b18###]\nto represent time or space complexity of interest with an upper bound where both and are constant. In this paper we will use the following fact\nin the analysis of time complexity."
|
| 120 |
+
}
|
| 121 |
+
],
|
| 122 |
+
"tables": {
|
| 123 |
+
"1": {
|
| 124 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Reduction of for the up-down scripts of in recursive relations</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S3.T1.45\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.8.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.8.4.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Eq.</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.5.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Expr. of \n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.6.2.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.7.3.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T1.8.4.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.8.4.6\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Reduction</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.8.4.7\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Balanced Reduction?</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.14.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.14.10.7\" rowspan=\"2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.14.10.7.1\">(<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2212.02495v4#S1.E12\" title=\"In 1 Introduction \u2023 Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials\"><span class=\"ltx_text ltx_ref_tag\">12</span></a>)</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S3.T1.9.5.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.10.6.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.11.7.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_tt\" id=\"S3.T1.12.8.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.14.10.6\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">\n ()</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.14.10.8\" rowspan=\"2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.14.10.8.1\">No</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.19.15\">\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.15.11.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.16.12.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.17.13.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T1.18.14.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.19.15.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.25.21\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.25.21.7\" rowspan=\"3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.25.21.7.1\">(<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2212.02495v4#S1.E13\" title=\"In 1 Introduction \u2023 Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials\"><span class=\"ltx_text ltx_ref_tag\">13</span></a>)</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.20.16.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.21.17.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.22.18.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T1.23.19.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.25.21.6\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">\n ()</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.25.21.8\" rowspan=\"3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.25.21.8.1\">No</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.30.26\">\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.26.22.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.27.23.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.28.24.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T1.29.25.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.30.26.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.35.31\">\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.31.27.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.32.28.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.33.29.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S3.T1.34.30.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T1.35.31.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.40.36\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.40.36.6\" rowspan=\"2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.40.36.6.1\">(<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2212.02495v4#S3.E23\" title=\"In 3.1 Computing the Radial Polynomials Recursively \u2023 3 Recursive Scheme for Radial Polynomials \u2023 Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials\"><span class=\"ltx_text ltx_ref_tag\">23</span></a>)</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.36.32.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.37.33.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.38.34.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T1.39.35.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.40.36.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.40.36.7\" rowspan=\"2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S3.T1.40.36.7.1\">Yes</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.45.41\">\n<td class=\"ltx_td ltx_align_right ltx_border_b\" id=\"S3.T1.41.37.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S3.T1.42.38.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S3.T1.43.39.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S3.T1.44.40.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S3.T1.45.41.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 125 |
+
"capture": "Table 1: Reduction of for the up-down scripts of in recursive relations"
|
| 126 |
+
},
|
| 127 |
+
"2": {
|
| 128 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Counting the time flops in <span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.26.1\">Algorithm</span>\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2212.02495v4#S4.SS2\" title=\"4.2 Balanced Binary Tree Iterative Algorithm (BBTIA) \u2023 4 Iterative Scheme for Computing the Zernike Radial Polynomials \u2023 Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials\"><span class=\"ltx_text ltx_ref_tag\">4.2</span></a> for .</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.24\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.4.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.2.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T2.4.2.3.1\">Line</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.2.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.2.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Loop Counting</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.2.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Remark</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.6.4.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.6.4.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.5.3.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.6.4.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.6.4.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for leaf node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.9.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.9.7.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">8</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.7.5.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.8.6.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.9.7.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.9.7.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for leaf node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.10.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.10.8.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">10</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.10.8.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.10.8.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.10.8.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.10.8.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.14.12\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.14.12.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.11.9.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.12.10.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.11.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.14.12.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.16.14\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.16.14.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">16</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.16.14.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.16.14.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.15.13.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.16.14.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.18.16\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.18.16.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">17</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.18.16.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.18.16.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.17.15.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.18.16.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.20.18\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.20.18.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.20.18.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.20.18.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.19.17.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.20.18.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.22.20\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.22.20.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">20</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.22.20.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.22.20.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.21.19.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.22.20.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.24.22\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.24.22.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.24.22.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.24.22.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.23.21.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T2.24.22.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for root node \n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 129 |
+
"capture": "Table 2: Counting the time flops in Algorithm\u00a04.2 for ."
|
| 130 |
+
},
|
| 131 |
+
"3": {
|
| 132 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Counting the time flops in <span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.28.1\">Algorithm</span>\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2212.02495v4#S4.SS2\" title=\"4.2 Balanced Binary Tree Iterative Algorithm (BBTIA) \u2023 4 Iterative Scheme for Computing the Zernike Radial Polynomials \u2023 Balanced Binary Tree Schemes for Computing Zernike Radial Polynomials\"><span class=\"ltx_text ltx_ref_tag\">4.2</span></a> for .</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.26\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.4.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.2.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T3.4.2.3.1\">Line</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.3.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.2.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.2.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Loop Counting</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.2.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Remark</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.5.3.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.5.3.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.5.3.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.5.3.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T3.5.3.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the leaf node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.8.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.8.6.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">8</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.6.4.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.7.5.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.8.6.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.8.6.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the leaf node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.9.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.9.7.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">10</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.9.7.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.9.7.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.9.7.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.9.7.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.12.10\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.12.10.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.10.8.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.11.9.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.12.10.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.12.10.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the leaf \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.16.14\">\n<td class=\"ltx_td\" id=\"S4.T3.16.14.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.13.11.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.14.12.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.15.13.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.16.14.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the leaf \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.18.16\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.18.16.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">16</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.18.16.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.18.16.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.17.15.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.18.16.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.20.18\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.20.18.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">17</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.20.18.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.20.18.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.19.17.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.20.18.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.22.20\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.22.20.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.22.20.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.22.20.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.21.19.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.22.20.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.24.22\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.24.22.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">20</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.24.22.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.24.22.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.23.21.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T3.24.22.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.26.24\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T3.26.24.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T3.26.24.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T3.26.24.5\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T3.25.23.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T3.26.24.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">for the root node \n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 133 |
+
"capture": "Table 3: Counting the time flops in Algorithm\u00a04.2 for ."
|
| 134 |
+
},
|
| 135 |
+
"4": {
|
| 136 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Computational Complexity of Computing </figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.24\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T4.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S4.T4.3.1.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Type of Algorithm</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.3.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Double Indices \n</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.3.1.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Space Complexity</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.3.1.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Time Complexity</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T4.6.4.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Recursive algorithm</th>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.4.2.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.5.3.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.6.4.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.9.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T4.9.7.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.9.7.4.1\">CalcRadiPolyBBTRA</span></th>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.7.5.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.8.6.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.9.7.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.10\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T4.12.10.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.10.8.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.11.9.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.12.10.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.15.13\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T4.15.13.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.13.11.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.14.12.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.15.13.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.18.16\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T4.18.16.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Iterative algorithm</th>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.16.14.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.17.15.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T4.18.16.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.21.19\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T4.21.19.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.21.19.4.1\">CalcRadiPolyBBTIA</span></th>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.19.17.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.20.18.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T4.21.19.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.24.22\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_b\" id=\"S4.T4.24.22.4\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></th>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T4.22.20.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T4.23.21.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T4.24.22.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 137 |
+
"capture": "Table 4: Computational Complexity of Computing "
|
| 138 |
+
}
|
| 139 |
+
},
|
| 140 |
+
"image_paths": {
|
| 141 |
+
"1": {
|
| 142 |
+
"figure_path": "2212.02495v4_figure_1.png",
|
| 143 |
+
"caption": "Figure 1: Principle of Recursive scheme for computing Rnm\u2061(\u03c1)superscriptsubscriptR\ud835\udc5b\ud835\udc5a\ud835\udf0c\\operatorname{R}_{n}^{m}(\\rho)roman_R start_POSTSUBSCRIPT italic_n end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 ) with BBT when n\u2260m\u22650\ud835\udc5b\ud835\udc5a0n\\neq m\\geq 0italic_n \u2260 italic_m \u2265 0",
|
| 144 |
+
"url": "http://arxiv.org/html/2212.02495v4/extracted/5799013/CalcZernikeRnmRecursive.drawio.png"
|
| 145 |
+
},
|
| 146 |
+
"2": {
|
| 147 |
+
"figure_path": "2212.02495v4_figure_2.png",
|
| 148 |
+
"caption": "Figure 2: Principle of Iterative Scheme for Computing Rnm\u2061(\u03c1)superscriptsubscriptR\ud835\udc5b\ud835\udc5a\ud835\udf0c\\operatorname{R}_{n}^{m}(\\rho)roman_R start_POSTSUBSCRIPT italic_n end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 ) when n\u2260|m|\ud835\udc5b\ud835\udc5an\\neq\\left|m\\right|italic_n \u2260 | italic_m |",
|
| 149 |
+
"url": "http://arxiv.org/html/2212.02495v4/extracted/5799013/CalcZernikeRnmIterPrinciple.drawio.png"
|
| 150 |
+
},
|
| 151 |
+
"3": {
|
| 152 |
+
"figure_path": "2212.02495v4_figure_3.png",
|
| 153 |
+
"caption": "Figure 3: Illustration of Iterative Algorithm for Computing Rnm\u2061(\u03c1)superscriptsubscriptR\ud835\udc5b\ud835\udc5a\ud835\udf0c\\operatorname{R}_{n}^{m}(\\rho)roman_R start_POSTSUBSCRIPT italic_n end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 )",
|
| 154 |
+
"url": "http://arxiv.org/html/2212.02495v4/extracted/5799013/CalcZernikeRnmIterAlgorithm.drawio.png"
|
| 155 |
+
},
|
| 156 |
+
"4(a)": {
|
| 157 |
+
"figure_path": "2212.02495v4_figure_4(a).png",
|
| 158 |
+
"caption": "(a) Running time for R28m\u2061(\u03c1)superscriptsubscriptR28\ud835\udc5a\ud835\udf0c\\operatorname{R}_{28}^{m}(\\rho)roman_R start_POSTSUBSCRIPT 28 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 )\nFigure 4: Diagrams of running time of computing Rnm\u2061(\u03c1)superscriptsubscriptR\ud835\udc5b\ud835\udc5a\ud835\udf0c\\operatorname{R}_{n}^{m}(\\rho)roman_R start_POSTSUBSCRIPT italic_n end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 ) the difference of n\ud835\udc5bnitalic_n and m\ud835\udc5amitalic_m for n\u2208{28,29}\ud835\udc5b2829n\\in\\left\\{28,29\\right\\}italic_n \u2208 { 28 , 29 } and m\u22650\ud835\udc5a0m\\geq 0italic_m \u2265 0.",
|
| 159 |
+
"url": "http://arxiv.org/html/2212.02495v4/extracted/5799013/RunTimeData-R28withRGB.png"
|
| 160 |
+
},
|
| 161 |
+
"4(b)": {
|
| 162 |
+
"figure_path": "2212.02495v4_figure_4(b).png",
|
| 163 |
+
"caption": "(b) Running time for R29m\u2061(\u03c1)superscriptsubscriptR29\ud835\udc5a\ud835\udf0c\\operatorname{R}_{29}^{m}(\\rho)roman_R start_POSTSUBSCRIPT 29 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 )\nFigure 4: Diagrams of running time of computing Rnm\u2061(\u03c1)superscriptsubscriptR\ud835\udc5b\ud835\udc5a\ud835\udf0c\\operatorname{R}_{n}^{m}(\\rho)roman_R start_POSTSUBSCRIPT italic_n end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_m end_POSTSUPERSCRIPT ( italic_\u03c1 ) the difference of n\ud835\udc5bnitalic_n and m\ud835\udc5amitalic_m for n\u2208{28,29}\ud835\udc5b2829n\\in\\left\\{28,29\\right\\}italic_n \u2208 { 28 , 29 } and m\u22650\ud835\udc5a0m\\geq 0italic_m \u2265 0.",
|
| 164 |
+
"url": "http://arxiv.org/html/2212.02495v4/extracted/5799013/RunTimeData-R29withRGB.png"
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
"validation": true,
|
| 168 |
+
"references": [
|
| 169 |
+
{
|
| 170 |
+
"1": {
|
| 171 |
+
"title": "Zernike polynomials and atmospheric turbulence.",
|
| 172 |
+
"author": "Robert J. Noll.",
|
| 173 |
+
"venue": "Journal of the Optical Society of America, 66(3):207\u2013211, Mar\n1976.",
|
| 174 |
+
"url": null
|
| 175 |
+
}
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"2": {
|
| 179 |
+
"title": "An automatic method for generating symbolic expressions of Zernike\ncircular polynomials.",
|
| 180 |
+
"author": "Hong-Yan Zhang, Yu Zhou, and Fu-Yun Li.",
|
| 181 |
+
"venue": "IEEE Access, 11(6):56481\u201356493, June 2023.",
|
| 182 |
+
"url": null
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"3": {
|
| 187 |
+
"title": "Zernike annular polynomials for imaging systems with annular pupils.",
|
| 188 |
+
"author": "Virendra N. Mahajan.",
|
| 189 |
+
"venue": "Journal of The Optical Society of America, 71(1):75\u201385, Jan\n1981.",
|
| 190 |
+
"url": null
|
| 191 |
+
}
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"4": {
|
| 195 |
+
"title": "Orthonormal aberration polynomials for optical systems with circular\nand annular sector pupils.",
|
| 196 |
+
"author": "Jos\u00e9 Antonio D\u00edaz and Virendra N. Mahajan.",
|
| 197 |
+
"venue": "Applied Optics, 52(6):1136\u20131147, Feb 2013.",
|
| 198 |
+
"url": null
|
| 199 |
+
}
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"5": {
|
| 203 |
+
"title": "Optical Shop Testing.",
|
| 204 |
+
"author": "Daniel Malacara, editor.",
|
| 205 |
+
"venue": "John Wiley & Sons Inc, New York, 3 edition, 2007.",
|
| 206 |
+
"url": null
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"6": {
|
| 211 |
+
"title": "Zernike basis to cartesian transformations.",
|
| 212 |
+
"author": "Richard J. Mathar.",
|
| 213 |
+
"venue": "online, Sept. 13 2008.",
|
| 214 |
+
"url": null
|
| 215 |
+
}
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"7": {
|
| 219 |
+
"title": "Zernike Coefficients.",
|
| 220 |
+
"author": "Jens B\u00fchren.",
|
| 221 |
+
"venue": "Springer Berlin Heidelberg, Berlin, Heidelberg, 2018.",
|
| 222 |
+
"url": null
|
| 223 |
+
}
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"8": {
|
| 227 |
+
"title": "Zernike Aberrations.",
|
| 228 |
+
"author": "Christopher George Berger.",
|
| 229 |
+
"venue": "Optics: The Website, https://opticsthewebsite.com/Zernike,\nAccessed on 18/August/2022.",
|
| 230 |
+
"url": null
|
| 231 |
+
}
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"9": {
|
| 235 |
+
"title": "Principles of Optics.",
|
| 236 |
+
"author": "Max Born and Emil Wolf.",
|
| 237 |
+
"venue": "Cambridge University Press, London, 7 edition, 1999.",
|
| 238 |
+
"url": null
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"10": {
|
| 243 |
+
"title": "Zernike circle polynomials and infinite integrals involving the\nproduct of Bessel functions.",
|
| 244 |
+
"author": "A. J. E. M. Janssen.",
|
| 245 |
+
"venue": "online, Jul 5 2010.",
|
| 246 |
+
"url": null
|
| 247 |
+
}
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"11": {
|
| 251 |
+
"title": "Recursive formula to compute Zernike radial polynomials.",
|
| 252 |
+
"author": "Barmak Honarvar Shakibaei and Raveendran Paramesran.",
|
| 253 |
+
"venue": "Optics Letters, 38(14):2487\u20132489, Jul 2013.",
|
| 254 |
+
"url": null
|
| 255 |
+
}
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"12": {
|
| 259 |
+
"title": "ZEMAX: Optical Design Program User\u2019s Guide, 10-th edition,\n2013.",
|
| 260 |
+
"author": "Focus Software Inc.",
|
| 261 |
+
"venue": "www.zemax.com.",
|
| 262 |
+
"url": null
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"13": {
|
| 267 |
+
"title": "A comparative analysis of algorithms for fast computation of Zernike\nmoments.",
|
| 268 |
+
"author": "C. W. Chong, P. Raveendran, and R. Mukundan.",
|
| 269 |
+
"venue": "Pattern Recognition, 36(3):731\u2013742, 2003.",
|
| 270 |
+
"url": null
|
| 271 |
+
}
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"14": {
|
| 275 |
+
"title": "On the Mathematical Properties of the Zernike Polynomials.",
|
| 276 |
+
"author": "E. C. Kintner.",
|
| 277 |
+
"venue": "Optics Acta, 23(8):679\u2013680, 1976.",
|
| 278 |
+
"url": null
|
| 279 |
+
}
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"15": {
|
| 283 |
+
"title": "Algorithm for Computation of Zernike Polynomials Expansion\nCoefficients.",
|
| 284 |
+
"author": "A. Prata and W. V. T. Rusch.",
|
| 285 |
+
"venue": "Applied Optics, 28:749\u2013754, 1989.",
|
| 286 |
+
"url": null
|
| 287 |
+
}
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"16": {
|
| 291 |
+
"title": "Matrix Computations.",
|
| 292 |
+
"author": "Gene H. Golub and Charles F. Van Loan.",
|
| 293 |
+
"venue": "Johns Hopkins University Press, Baltimore, 4th edition, Jan. 2013.",
|
| 294 |
+
"url": null
|
| 295 |
+
}
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"17": {
|
| 299 |
+
"title": "The Art of Computer Programming, volume 1: Fundamental\nAlgorithms.",
|
| 300 |
+
"author": "Donald E. Knuth.",
|
| 301 |
+
"venue": "Addison-Wesley, New York, 3 edition, 1997.",
|
| 302 |
+
"url": null
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"18": {
|
| 307 |
+
"title": "Introduction to Algorithms.",
|
| 308 |
+
"author": "Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, and Clifford Stein.",
|
| 309 |
+
"venue": "The MIT Press, Cambridge, 4-th edition, 2022.",
|
| 310 |
+
"url": null
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
],
|
| 314 |
+
"url": "http://arxiv.org/html/2212.02495v4"
|
| 315 |
+
}
|
20240819/2303.04238v5.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2303.12341v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2305.03515v7.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2306.12341v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2307.10230v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2307.11091v2.json
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Data-driven criteria for quantum correlations",
|
| 3 |
+
"abstract": "We build a machine learning model to detect correlations\nin a three-qubit system using a neural network trained in an unsupervised manner on randomly generated states.\nThe network is forced to recognize separable states, and correlated states are detected as anomalies.\nQuite surprisingly, we find that the proposed detector performs much better at distinguishing a weaker form of quantum correlations,\nnamely, the quantum discord, than entanglement. In fact, it has a tendency to grossly overestimate the set of entangled states\neven at the optimal threshold for entanglement detection, while it underestimates the set of discordant states to a much lesser\nextent.\nIn order to illustrate the nature of states classified as quantum-correlated,\nwe construct a diagram containing various types of states \u2013 entangled, as well as separable, both discordant and non-discordant.\nWe find that the near-zero value of the\nrecognition loss reproduces the shape of the non-discordant separable\nstates with high accuracy, especially considering the non-trivial shape of this set on the diagram.\nThe network architecture is designed carefully: it preserves\nseparability, and its\noutput is equivariant with respect to qubit permutations.\nWe show that the choice of architecture is important to get\nthe highest detection accuracy, much better than for a baseline model that just utilizes a partial trace operation.",
|
| 4 |
+
"sections": [],
|
| 5 |
+
"appendix": [],
|
| 6 |
+
"tables": {
|
| 7 |
+
"1": {
|
| 8 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S0.T1\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S0.T1.11\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S0.T1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S0.T1.1.1.2\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S0.T1.1.1.2.1\">\n<tr class=\"ltx_tr\" id=\"S0.T1.1.1.2.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S0.T1.1.1.2.1.1.1\">test</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T1.1.1.2.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S0.T1.1.1.2.1.2.1\">set</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"4\" id=\"S0.T1.1.1.1\">\n for different subsets</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T1.11.12.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r\" id=\"S0.T1.11.12.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T1.11.12.1.2\">separable</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T1.11.12.1.3\">non-discordant</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T1.11.12.1.4\">discordant</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T1.11.12.1.5\">entangled</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S0.T1.6.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S0.T1.2.2.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T1.3.3.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T1.4.4.3\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T1.5.5.4\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T1.6.6.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T1.11.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S0.T1.7.7.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T1.8.8.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T1.9.9.3\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T1.10.10.4\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T1.11.11.5\"></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Average separator loss for different subsets in pure and mixed test sets quantifying similarity between different types of states and separable ones.</figcaption>\n</figure>",
|
| 9 |
+
"capture": "Table 1: Average separator loss for different subsets in pure and mixed test sets quantifying similarity between different types of states and separable ones."
|
| 10 |
+
},
|
| 11 |
+
"2": {
|
| 12 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S0.T2\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S0.T2.5\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S0.T2.1.1\">\n<th class=\"ltx_td ltx_nopad ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T2.1.1.1\"><svg height=\"19.22\" overflow=\"visible\" version=\"1.1\" width=\"135.92\"><g transform=\"translate(0,19.22) scale(1,-1)\"><path d=\"M 0,19.22 135.92,0\" stroke=\"#000000\" stroke-width=\"0.4\"></path><g class=\"ltx_svg_fog\" transform=\"translate(0,0)\"><g transform=\"translate(0,9.61) scale(1, -1)\"><foreignobject height=\"9.61\" overflow=\"visible\" width=\"39.09\">\n<span class=\"ltx_inline-block\" id=\"S0.T2.1.1.1.pic1.1.1\">\n<span class=\"ltx_inline-block ltx_align_left\" id=\"S0.T2.1.1.1.pic1.1.1.1\">\n<span class=\"ltx_p\" id=\"S0.T2.1.1.1.pic1.1.1.1.1\">Labels</span>\n</span>\n</span></foreignobject></g></g><g class=\"ltx_svg_fog\" transform=\"translate(67.96,9.61)\"><g transform=\"translate(0,9.61) scale(1, -1)\"><foreignobject height=\"9.61\" overflow=\"visible\" width=\"67.96\">\n<span class=\"ltx_inline-block\" id=\"S0.T2.1.1.1.pic1.2.1\">\n<span class=\"ltx_inline-block ltx_align_right\" id=\"S0.T2.1.1.1.pic1.2.1.1\">\n<span class=\"ltx_p\" id=\"S0.T2.1.1.1.pic1.2.1.1.1\">Predictions</span>\n</span>\n</span></foreignobject></g></g></g></svg></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T2.1.1.2\"><span class=\"ltx_text ltx_inline-block\" id=\"S0.T2.1.1.2.1\" style=\"width:70.0pt;\">separable</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T2.1.1.3\"><span class=\"ltx_text ltx_inline-block\" id=\"S0.T2.1.1.3.1\" style=\"width:70.0pt;\">entangled</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S0.T2.2.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S0.T2.2.2.1\">separable \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T2.2.2.2\">28340</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T2.2.2.3\">15257</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T2.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T2.3.3.1\">entangled \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T2.3.3.2\">305</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T2.3.3.3\">21098</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T2.4.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S0.T2.4.4.1\">separable \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T2.4.4.2\">30696</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T2.4.4.3\">12901</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T2.5.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T2.5.5.1\">entangled \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T2.5.5.2\">942</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T2.5.5.3\">20461</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Confusion matrix for separable/entangled states classification at \nand .</figcaption>\n</figure>",
|
| 13 |
+
"capture": "Table 2: Confusion matrix for separable/entangled states classification at \nand ."
|
| 14 |
+
},
|
| 15 |
+
"3": {
|
| 16 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S0.T3\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S0.T3.5\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S0.T3.1.1\">\n<th class=\"ltx_td ltx_nopad ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T3.1.1.1\"><svg height=\"19.22\" overflow=\"visible\" version=\"1.1\" width=\"135.92\"><g transform=\"translate(0,19.22) scale(1,-1)\"><path d=\"M 0,19.22 135.92,0\" stroke=\"#000000\" stroke-width=\"0.4\"></path><g class=\"ltx_svg_fog\" transform=\"translate(0,0)\"><g transform=\"translate(0,9.61) scale(1, -1)\"><foreignobject height=\"9.61\" overflow=\"visible\" width=\"39.09\">\n<span class=\"ltx_inline-block\" id=\"S0.T3.1.1.1.pic1.1.1\">\n<span class=\"ltx_inline-block ltx_align_left\" id=\"S0.T3.1.1.1.pic1.1.1.1\">\n<span class=\"ltx_p\" id=\"S0.T3.1.1.1.pic1.1.1.1.1\">Labels</span>\n</span>\n</span></foreignobject></g></g><g class=\"ltx_svg_fog\" transform=\"translate(67.96,9.61)\"><g transform=\"translate(0,9.61) scale(1, -1)\"><foreignobject height=\"9.61\" overflow=\"visible\" width=\"67.96\">\n<span class=\"ltx_inline-block\" id=\"S0.T3.1.1.1.pic1.2.1\">\n<span class=\"ltx_inline-block ltx_align_right\" id=\"S0.T3.1.1.1.pic1.2.1.1\">\n<span class=\"ltx_p\" id=\"S0.T3.1.1.1.pic1.2.1.1.1\">Predictions</span>\n</span>\n</span></foreignobject></g></g></g></svg></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T3.1.1.2\"><span class=\"ltx_text ltx_inline-block\" id=\"S0.T3.1.1.2.1\" style=\"width:70.0pt;\">non-discordant</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S0.T3.1.1.3\"><span class=\"ltx_text ltx_inline-block\" id=\"S0.T3.1.1.3.1\" style=\"width:70.0pt;\">discordant</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S0.T3.2.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S0.T3.2.2.1\">non-discordant \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T3.2.2.2\">25114</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T3.2.2.3\">1192</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T3.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T3.3.3.1\">discordant \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T3.3.3.2\">3531</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S0.T3.3.3.3\">35163</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T3.4.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S0.T3.4.4.1\">non-discordant \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T3.4.4.2\">25822</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S0.T3.4.4.3\">484</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S0.T3.5.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S0.T3.5.5.1\">discordant \n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T3.5.5.2\">5816</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S0.T3.5.5.3\">32878</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Confusion matrix for non-discordant/discordant states classification at \nand .</figcaption>\n</figure>",
|
| 17 |
+
"capture": "Table 3: Confusion matrix for non-discordant/discordant states classification at \nand ."
|
| 18 |
+
}
|
| 19 |
+
},
|
| 20 |
+
"image_paths": {
|
| 21 |
+
"1": {
|
| 22 |
+
"figure_path": "2307.11091v2_figure_1.png",
|
| 23 |
+
"caption": "Figure 1: The 3-qubit neural network separator model. The input density matrix is convolved separately with three convolution layers, each with 4\u00d74444\\times 44 \u00d7 4 kernels that work in parallel.\nThe stride and dilation parameters defining these convolution layers are arranged in such a way that they operate on each qubit subspace separately (details can be found in SM [43]). After the convolution layers, shape-preserving fully-connected layers are applied independently for each qubit matrix.",
|
| 24 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/1.png"
|
| 25 |
+
},
|
| 26 |
+
"2": {
|
| 27 |
+
"figure_path": "2307.11091v2_figure_2.png",
|
| 28 |
+
"caption": "Figure 2: (a) Separator performance for detecting discordant (green) and entangled (red) states, tested on Smixedsubscript\ud835\udc46mixedS_{\\mathrm{mixed}}italic_S start_POSTSUBSCRIPT roman_mixed end_POSTSUBSCRIPT. The results present: (left) precision vs. recall curves, and (right) balanced accuracy, B\u2062A\ud835\udc35\ud835\udc34BAitalic_B italic_A depending on the threshold \u03c4\ud835\udf0f\\tauitalic_\u03c4 value. For comparison, partial trace-based baseline model performance is also presented by blue and orange curves, respectively. (b) Same as (a) but for separator model with removed FC layers \u2013 cf. Fig. 1. (c) Same as in (a) but for the separator trained on the non-product states only.",
|
| 29 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/2.png"
|
| 30 |
+
},
|
| 31 |
+
"3": {
|
| 32 |
+
"figure_path": "2307.11091v2_figure_3.png",
|
| 33 |
+
"caption": "Figure 3: Reconstruction loss \u2112\u2112\\mathcal{L}caligraphic_L for the already trained separator model tested on the family of 3-qubit states with their separability/discordance conditions known and parameterized on 2D map (see inset for the map division into the classes).",
|
| 34 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/3.png"
|
| 35 |
+
},
|
| 36 |
+
"4": {
|
| 37 |
+
"figure_path": "2307.11091v2_figure_4.png",
|
| 38 |
+
"caption": "Figure S1: The separator autoencoder composed of trainable encoder neural network (gray triangle) and analytical decoder that simply calculates the Kronecker product.",
|
| 39 |
+
"url": "http://arxiv.org/html/2307.11091v2/x1.png"
|
| 40 |
+
},
|
| 41 |
+
"5": {
|
| 42 |
+
"figure_path": "2307.11091v2_figure_5.png",
|
| 43 |
+
"caption": "Figure S2: \nSame as in Fig. 2 but for separator trained only on some part of the original training set: pure separable states (Pure), product states (Prod), non-discordant states (ZD), separable states (Sep) and non-product separable states (NPS). Results for separator without four fully-connected layers (containing only convolutional ones) are marked as \u201dNo FC4\u201d.",
|
| 44 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/S2.png"
|
| 45 |
+
},
|
| 46 |
+
"6": {
|
| 47 |
+
"figure_path": "2307.11091v2_figure_6.png",
|
| 48 |
+
"caption": "Figure S3: Example weights of 4\u00d74444\\times 44 \u00d7 4 convolutional kernels for the already trained 3-qubit separator model. The kernels reconstruct (a) real and (b) imaginary parts of the first qubit (weights for two other qubits are similar) for the plain separator model, with just convolutional layers. Similarly, (c) and (d) show real and imaginary parts of kernels for separator with additional fully-connected layers.",
|
| 49 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/S3.jpg"
|
| 50 |
+
},
|
| 51 |
+
"7": {
|
| 52 |
+
"figure_path": "2307.11091v2_figure_7.png",
|
| 53 |
+
"caption": "Figure S4: Reconstruction loss \u2112\u2112\\mathcal{L}caligraphic_L for the partial trace-based model tested on family of 3-qubit states with their separability/discordance conditions known parameterized on 2D map.",
|
| 54 |
+
"url": "http://arxiv.org/html/2307.11091v2/extracted/5799732/S4.jpg"
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
"validation": true,
|
| 58 |
+
"references": [],
|
| 59 |
+
"url": "http://arxiv.org/html/2307.11091v2"
|
| 60 |
+
}
|
20240819/2309.05919v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2402.01975v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2403.06009v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2403.15760v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2403.18198v2.json
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Generative Medical Segmentation",
|
| 3 |
+
"abstract": "Rapid advancements in medical image segmentation performance have been significantly driven by the development of Convolutional Neural Networks (CNNs) and Vision Transformers (ViTs). These models follow the discriminative pixel-wise classification learning paradigm and often have limited ability to generalize across diverse medical imaging datasets. In this manuscript, we introduce Generative Medical Segmentation (GMS), a novel approach leveraging a generative model to perform image segmentation. Concretely, GMS employs a robust pre-trained vision foundation model to extract latent representations for images and corresponding ground truth masks, followed by a model that learns a mapping function from the image to the mask in the latent space. Once trained, the model generates an estimated segmentation mask using the pre-trained vision foundation model to decode the predicted latent representation back into the image space. The design of GMS leads to fewer trainable parameters in the model which reduces the risk of overfitting and enhances its generalization capability. Our experimental analysis across five public datasets in different medical imaging domains demonstrates GMS outperforms existing discriminative and generative segmentation models. Furthermore, GMS is able to generalize well across datasets from different centers within the same imaging modality. Our experiments suggest GMS offers a scalable and effective solution for medical image segmentation. GMS implementation and trained model weights are available at https://github.com/King-HAW/GMS",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Image segmentation plays a critical role in medical image analysis by enabling the automated and precise delineation of anatomical and pathological structures within medical images. This process allows clinicians to obtain detailed visualizations, such as lesions and tumors, which can support computer-aided diagnosis systems and enhance the accuracy of clinical assessments. Additionally, the quantitative assessments derived from segmentation are vital for treatment planning and monitoring of disease progression. By incorporating automated segmentation into clinical practice, the precision and efficacy of therapeutic interventions are improved, leading to better patient outcomes (Al-Dhabyani et al. 2020 ###reference_b1###; Tschandl, Rosendahl, and Kittler 2018 ###reference_b28###; Jha et al. 2021 ###reference_b13###).\nCurrent deep learning models designed for medical image segmentation, such as U-Net (Ronneberger, Fischer, and Brox 2015 ###reference_b24###) and its various adaptations (Ruan et al. 2023 ###reference_b25###; Ibtehaz and Kihara 2023 ###reference_b10###), have significantly advanced the field of medical imaging analysis. These models have been pivotal in enhancing the accuracy and efficiency of segmenting objects from various imaging modalities such as MRI and CT. Early deep learning-based image segmentation models leverage convolution kernels to learn local patch representations from large amounts of labeled data. Despite their successes, models based on Convolutional Neural Networks (CNNs) often have a large number of parameters which can introduce challenges in model training and increase the likelihood of overfitting when training datasets are small. Additionally, the limited receptive field of the convolution kernel makes it difficult for CNN-based models to learn global context information that can provide important guidance during image segmentation. Moreover, CNN-based models struggle with generalizing to unseen domains, leading to potentially substantial performance drops when the test dataset distribution is shifted from the training dataset distribution.\nThe Vision Transformer (ViT) (Dosovitskiy et al. 2021 ###reference_b6###) model has recently been presented as a powerful alternative to CNN-based segmentation models in medical imaging analysis. ViT can capture global semantic information that the convolution kernel is unable to represent. Transformer-based segmentation models, such as UCTransNet (Wang et al. 2022a ###reference_b30###) and Swin-Unet (Cao et al. 2022 ###reference_b2###), leverage the transformer architecture to represent images as sequences of patches, enabling the model to learn relationships across the entire image. Transformer-based models facilitate a more holistic image analysis by integrating both local and global context information. Therefore, these models can accurately segment anatomical structures or pathological changes in medical images, surpassing CNN-based models in certain domains. However, transformer-based models are required to be trained on very large datasets to achieve optimal performance, which can be a major bottleneck given the scarcity of such datasets in the medical field. Additionally, the high computational costs needed for the multi-head attention module pose practical challenges for real-time applications and deployment in environments with limited computational resources. Furthermore, due to the large number of parameters in transformer-based models, there is an increased risk of overfitting when training on small datasets with subsequent challenges of poor generalization to out-of-domain datasets under such conditions.\n###figure_1### Generative models, such as Generative Adversarial Networks (GANs) (Goodfellow et al. 2014 ###reference_b7###) and Variational Autoencoders (VAEs) (Kingma and Welling 2013 ###reference_b15###), are often adopted as data augmentation techniques to improve the performance of segmentation models (Huo et al. 2022 ###reference_b9###). However, GANs suffer from mode collapse and may distort outputs when the number of training samples is small (Karras et al. 2020 ###reference_b14###). Additionally, GANs can not guarantee that the distribution of synthetic images is similar to the distribution of real images in the domain. Image-to-image translation models have been used to perform image segmentation in a generative manner, where the image serves as the input and the mask as the prediction. To date, the performance of image-to-image models is well below state-of-the-art segmentation model performance (Li et al. 2021 ###reference_b18###). Recently, MedSegDiff-V2 (Wu et al. 2024 ###reference_b32###) utilized a diffusion model for medical image segmentation, where a condition model is proposed to encode images into the feature space for mask generation. However, diffusion-based approaches require repetitive denoising steps which lead to longer inference times. GSS (Chen et al. 2023 ###reference_b4###) is a generative semantic segmentation framework designed for semantic image segmentation, where Vector Quantized Variational Autoencoder (VQ-VAE) (Van Den Oord, Vinyals et al. 2017 ###reference_b29###) was employed to project the image and mask into a latent space, and an additional image encoder was designed and trained to match the latent distributions between the mask and image. However, GSS has high computational costs as the additional image encoder is complex, requiring a large number of trainable parameters, to translate the input image into a latent prior distribution.\nIn this paper, we propose Generative Medical Segmentation (GMS) to perform image segmentation in a generative manner. GMS leverages a pre-trained image encoder to obtain latent representations containing the semantic information for input images and masks, and a latent mapping model is designed to learn a transformation function from the image latent representation to the mask latent representation. The final segmentation mask in the image space is obtained by decoding the transformed mask latent representation using a pre-trained image decoder paired with the pre-trained image encoder. In this approach, only the latent mapping model parameters are learned from the training dataset. The pre-trained image encoder and decoder are obtained from a vision foundation model trained on a large, general dataset. Therefore, the latent representations are more general to unseen data compared to models trained only on images for the desired specific task. We demonstrate GMS achieves the best performance on five public medical image segmentation datasets across different domains. Furthermore, we perform an external validation experiment to demonstrate that the inherent domain generalization ability of GMS is better than other domain generalization methods."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Works",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Medical Image Segmentation",
|
| 21 |
+
"text": "Medical image segmentation has experienced rapid advancements in the last decade due to the development of deep-learning techniques. The encoder-decoder architecture with skip connections enables accurate image segmentation by combining low-level and high-level features to perform pixel-wise prediction, making U-Net (Ronneberger, Fischer, and Brox 2015 ###reference_b24###) a benchmark method across various medical image segmentation tasks. Subsequent model enhancements such as MultiResUNet (Ibtehaz and Rahman 2020 ###reference_b11###) and ACC-UNet (Ibtehaz and Kihara 2023 ###reference_b10###) have been implemented using the basic architecture by integrating the residual block or redesigning the hierarchical feature fusion pipeline to gain improved segmentation performance. nnU-Net (Isensee et al. 2021 ###reference_b12###) established a guideline for tailoring the receptive field size of convolution kernels and network depth to specific tasks and also incorporated extensive data augmentation into model training to improve segmentation performance for specific medical imaging datasets.\nThe Vision Transformer (ViT) introduced the multi-head attention mechanism, which enables capturing long-range feature dependencies across patches in the image, leading to stronger feature representations for image segmentation compared to CNN-based models. This ability to model relationships between distant pixels has proven highly beneficial for medical image segmentation, where understanding the broader imaging context is often crucial to performing the task. ViT-based segmentation models (Cao et al. 2022 ###reference_b2###; Wang et al. 2022b ###reference_b31###, a ###reference_b30###) have competitive results compared against traditional CNN-based models."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Generative & Foundation Models",
|
| 27 |
+
"text": "Generative models are commonly designed for image synthesis and image-to-image translation tasks. For image synthesis, GANs (Goodfellow et al. 2014 ###reference_b7###) and VAEs (Kingma and Welling 2013 ###reference_b15###) are often leveraged to generate more data for downstream model training (Huo et al. 2022 ###reference_b9###; Chaitanya et al. 2021 ###reference_b3###), especially in the context of medical image segmentation, as the cost of obtaining large, annotated medical imaging datasets is high. Recently, studies have explored the use of diffusion models to create more training instances and alleviate the data scarcity problem (Ye et al. 2023 ###reference_b34###). However, the iterative denoising process in diffusion models results in a longer inference time compared to GAN or VAE-based approaches. For image-to-image translation, models developed on CNNs (Kong et al. 2021 ###reference_b17###) or ViT (Liu et al. 2023 ###reference_b21###) show satisfied results on the MRI missing modality completion task. Currently, few models are designed for performing the image segmentation task directly in a generative manner. GSS (Chen et al. 2023 ###reference_b4###) is the exception, this model employs VQ-VAE (Van Den Oord, Vinyals et al. 2017 ###reference_b29###) to discretize the image and mask into a finite set of latent codes, which are then reconstructed back into the image space. An independent image encoder is trained to match the image latent codes to the mask latent codes.\nFoundation models, such as Stable Diffusion (Rombach et al. 2022 ###reference_b23###) and Segment Anything (SAM) (Kirillov et al. 2023 ###reference_b16###), are trained on large-scale datasets and are designed to generalize across a wide range of tasks. These models are able to serve as a versatile starting point for numerous tasks. Stable Diffusion utilizes a VAE model to first encode the image into the latent space and leverages a UNet to iteratively denoise and reconstruct the latent embeddings, guiding the generation process towards a high-quality output. SAM is designed for image segmentation with a prompt design that allows user interactions to adapt the model to various segmentation tasks with no or minimal fine-tuning. Together, these models exemplify the power and flexibility of foundation models in addressing diverse and complex tasks such as image segmentation."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Methodology",
|
| 33 |
+
"text": ""
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Architecture Overview",
|
| 39 |
+
"text": "The Generative Medical Segmentation (GMS) model architecture is shown in Figure 1 ###reference_###. Given a 2D image and a corresponding segmentation mask , the pre-trained encoder is used to obtain latent representations and of and , respectively. The latent mapping model (LMM) is trained to use to predict an estimated latent representation of . is then decoded by the pre-trained decoder to obtain the predicted segmentation result in the original image space. Note the weights of and are pre-trained and frozen during both model training and inference, which enables updating only the LMM parameters during training. This approach reduces the number of trainable parameters in the model to be much smaller compared to other state-of-the-art deep learning segmentation models."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Image Tokenizer",
|
| 45 |
+
"text": "The pre-trained encoder and decoder are treated as an image tokenizer as they map an input from the image space to the latent space () or the latent to image space (). The choice of an appropriate and paired and to obtain a representative latent space for both input images and masks is critical for GMS performance. In this work, we use the weights of Stable Diffusion (SD) VAE (Rombach et al. 2022 ###reference_b23###) for and . Since SD-VAE was trained on a large natural image dataset (Schuhmann et al. 2022 ###reference_b26###), it has a rich and diverse latent information representation, leading to a strong zero-shot generalization ability even for medical images. SD-VAE can achieve near-perfect image reconstruction, which enables the feasibility of training GMS (Rombach et al. 2022 ###reference_b23###).\nSD-VAE is comprised of three down-sampling blocks in and three up-sampling blocks in . The latent representation is a 3D tensor containing spatial information ( if ). Such design enables to have a rich feature representation, improved reconstruction quality and enhanced the generalization of the latent representation."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Latent Mapping Model (LMM)",
|
| 51 |
+
"text": "The latent mapping model (LMM) is the key component in GMS to map from to . Instead of using the transformer block that recruits tons of parameters for multi-head attention, we build the LMM with 2D convolutions which make it lightweight. Besides, we do not include any down-sampling layers in LMM to avoid spatial information loss. Note excluding down-sampling layers is not practical in the original UNet model because the receptive fields of the convolution operations are greatly limited if no down-sampling layers are used in the model. Skip connections between convolutional layers are added to prevent vanishing gradients and the loss of semantic-relevant features.\nThe model structure is shown in the lower middle of Figure 1 ###reference_### (orange box). Given , which is acquired from the pre-trained encoder , it first goes through two convolution blocks where each block consists of a 2D convolutional layer (Conv), a PReLU activation function, and group normalization (GN) layer to obtain the feature vector .\nNext, a self-attention mechanism layer is added to better capture global semantic relationships and facilitate feature interaction within . Specifically, we use three independent convolution operations to generate query , key , and value , respectively:\nHere is the convolution kernel matrix and is the learnable bias.\nThen the self-attention for the query, key, and value is computed as:\nwhere denotes the feature channel of , and denotes the softmax normalization function. Due to the small spatial size of the , employing the self-attention mechanism allows for the efficient capture of long-range dependencies and interactions within the latent representations."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.4",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Loss Functions",
|
| 57 |
+
"text": "Two loss functions are used to guide model training, a matching loss in the latent space and a segmentation loss in the image space. is formulated to enforce similarity between and . Specifically, is defined as:\nenforces similarity between the predicted mask and the ground truth mask , even where the latent representation deviates from . is defined as:\nwhere denotes element-wise multiplication. The final compound loss function used for model training is:\n###figure_2###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Experiments",
|
| 63 |
+
"text": ""
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.1",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Datasets",
|
| 69 |
+
"text": "We evaluated the performance of GMS on five public datasets: BUS (Yap et al. 2017 ###reference_b33###), BUSI (Al-Dhabyani et al. 2020 ###reference_b1###), GlaS (Sirinukunwattana et al. 2017 ###reference_b27###), HAM10000 (Tschandl, Rosendahl, and Kittler 2018 ###reference_b28###) and Kvasir-Instrument (Jha et al. 2021 ###reference_b13###). BUS and BUSI are breast lesion ultrasound datasets that contain 163 and 647 images, respectively. GlaS is a colon histology segmentation challenge dataset divided into 85 images for training and 80 images for testing. HAM10000 is a large dermatoscopic dataset that consists of 10015 images with skin lesion segmentation masks. The Kvasir-Instrument dataset contains 590 endoscopic images with tool segmentation masks. For datasets not already divided, we randomly select 80% of the images for training and the remaining 20% for testing."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Implementation Details",
|
| 75 |
+
"text": "Our framework is implemented using PyTorch v1.13, and all model training was performed on an NVIDIA A100 40G GPU. We use AdamW (Loshchilov and Hutter 2019 ###reference_b22###) as the training optimizer. We utilize the cosine annealing learning rate scheduler to adjust the learning rate in each epoch with the initial learning rate set to . For all experiments, the batch size was set to 8 and the total training epochs were 1000. The input image size is resized to , and on-the-fly data augmentations were performed during training including random flip, random rotation, and color jittering in the HSV domain. We set a threshold of 0.5 to change the predicted gray-scale masks to binary masks. We quantify segmentation performance using the Dice coefficient (DSC), Intersection over Union (IoU), and Hausdorff Distance 95th percentile (HD95)."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.3",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Comparison with State-of-the-Art Models",
|
| 81 |
+
"text": "We compare GMS with other state-of-the-art methods to evaluate its performance, including CNN-based methods: UNet (Ronneberger, Fischer, and Brox 2015 ###reference_b24###), MultiResUNet (Ibtehaz and Rahman 2020 ###reference_b11###), ACC-UNet (Ibtehaz and Kihara 2023 ###reference_b10###), nnUNet (Isensee et al. 2021 ###reference_b12###) and EGE-UNet (Ruan et al. 2023 ###reference_b25###); Transformer-based methods: SwinUNet (Cao et al. 2022 ###reference_b2###), SME-SwinUNet (Wang et al. 2022b ###reference_b31###) and UCTransNet (Wang et al. 2022a ###reference_b30###); and generative methods: MedSegDiff-V2 (Wu et al. 2024 ###reference_b32###), SDSeg (Lin et al. 2024 ###reference_b20###) and GSS (Chen et al. 2023 ###reference_b4###)). We also compared against two domain generalization models: MixStyle (Zhou et al. 2023 ###reference_b35###) and DSU (Li et al. 2022 ###reference_b19###) to evaluate the inherent domain generalization ability of GMS.\nQuantitative comparisons of all models on the two ultrasound datasets are presented in Table 1 ###reference_###. GMS achieves the highest DSC, IoU, and HD95 compared to other models. We also present the trainable parameter of each model in Table 1 ###reference_###, where only EGE-UNet has fewer trainable parameters than GMS, and most models have between and more parameters. GMS achieves a 2.71% and 1.87% improvement in the DSC metric on the BUS and BUSI datasets, respectively, compared to the second-best model. Additionally, for IoU and HD95 metrics, our approach shows improvement of up to 1.88% and 4.64, respectively, over the second-best model. Transformer-based segmentation models do not show competitive results on these datasets, which indicates that the intrinsic long-range modeling capability of the transformer block may not be suitable for such tasks as gray-scale ultrasound images lack chromatic information that often aids in distinguishing different tissues. The limited texture and low contrast in these images might reduce the effectiveness of the multi-head attention module in transformer-based models. nnUNet, as a powerful auto-configuration segmentation model, beats transformer-based models and even some generative models, and is the second-best model on the BUS dataset. Notably, segmentation performance is not correlated to the models\u2019 number of trainable parameters but is beneficial from the plausible model design and the robust representations provided by the pre-trained vision foundation model. However, models (e.g. EGE-UNet) that contain too few trainable parameters, may lack the capacity to capture complex patterns and relationships in the images, leading to underfitting and poor performance on both datasets. On the contrary, models (e.g. SME-SwinUNet and MedSegDiff-V2) with an excess of parameters can easily overfit the training dataset, memorizing rather than generalizing, which compromises its performance on the test set.\nTable 2 ###reference_### presents quantitative results on the other three datasets, where all images are colored. GMS achieves the best segmentation performance except for the IoU metric on the Kvasir-Instrument dataset. It is worth noting that not all generative segmentation models outperform other discriminative models, which proves the importance of design when applying the generative model framework. MedSegDiff-V2 employs an encoder-decoder model to embed images as conditions for guiding the denoising step, yet its performance remains suboptimal. SDSeg utilized the Stable Diffusion model to generate latent representations and further decode them as predicted masks. Additionally, SDSeg proposed a trainable encoder to embed the image into the latent space as the condition for the denoising step. This design does not maximize the use of the knowledge encapsulated in the pre-trained vision foundation model, which may account for its poorer performance. GMS outperforms both CNN-based and transformer-based models, suggesting that generative models when carefully designed can be suitable for a wide variety of segmentation tasks."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.4",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Domain Generalization Ability",
|
| 87 |
+
"text": "We evaluated all models on their ability to segment images within the same modality but collected as a part of different datasets to demonstrate model domain generalization ability. Specifically, we train the model using the training set from one dataset but evaluate the performance on a different dataset. This experiment was performed with the BUS and BUSI datasets interchangeably as training and test sets since they are the same modalities (breast ultrasound) but acquired from different centers and vendors. Therefore, the data distributions of the training and test sets are not aligned. Quantitative results are shown in Table 3 ###reference_###, where GMS outperforms all other models in terms of DSC and HD95. In particular, nnUNet demonstrates powerful domain generalization abilities on both datasets, thanks to its network architecture and the use of data augmentation techniques. However, GMS surpasses nnUNet by around 2% for DSC. We also recruited two domain generalization methods (MixStyle and DSU) for comparison. MixStyle and DSU were implemented based on DeepLab-V3 (Chen et al. 2017 ###reference_b5###) and employed the ResNet50 (He et al. 2016 ###reference_b8###) as the encoder. GMS is better than two domain generalization methods, which demonstrates the powerful domain generalization ability of our model. The improvements achieved by our model are likely due to the latent representations derived from the pre-trained large vision model, which are domain-agnostic as it was trained on a large, general-purpose dataset. Additionally, GMS has fewer trainable parameters compared to the other generative models, which further reduces the likelihood of overfitting the model to the training set."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.5",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "Qualitative Segmentation Results",
|
| 93 |
+
"text": "Qualitative segmentation results for different models are shown in Figure 2 ###reference_###. The yellow and green lines denote the contours of predictions and ground truth, respectively. In images acquired from the BUS and BUSI datasets (top two rows), breast lesions show regular shapes, but the segmentation results of other models are often irregular causing over- or under-segmentation. GMS shows the most consistent results compared with the ground truth, which also proves the superiority of GMS. For the histology and dermatology images (the third and fourth rows), there are some regions with highly similar appearances to the target area, which leads to false positive segmentation results in the CNN and transformer-based models. However, GMS is still able to accurately segment those images with complex or misleading patterns. For the endoscopic image (last row), generative methods (GSS and our approach) give the most accurate predictions, which demonstrates the advantages of employing large pre-trained vision models for the segmentation task."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.6",
|
| 97 |
+
"parent_section_id": "4",
|
| 98 |
+
"section_name": "Sensitivity Analysis",
|
| 99 |
+
"text": ""
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4.6.1",
|
| 103 |
+
"parent_section_id": "4.6",
|
| 104 |
+
"section_name": "Ablation Studies on Loss Function.",
|
| 105 |
+
"text": "We performed an ablation study on different loss function combinations for BUSI, HAM10000, and Kvasir-Instrument datasets. As shown in Table 4 ###reference_###, the compound loss ( + ) always has the best segmentation performance regardless of dataset size or modality. Interestingly, different datasets have different supervision preferences. GMS using only for model training performs better on BUSI and HAM10000 datasets, which implies supervision in the latent space is more effective compared to the image space. However, GMS performance is better for when training on the Kvasir-Instrument dataset, indicating supervision in the image space is more important. The compound loss having the best performance suggests that supervision in the image and latent space are both important for achieving the best performance."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "4.6.2",
|
| 109 |
+
"parent_section_id": "4.6",
|
| 110 |
+
"section_name": "Image Tokenizer Effectiveness.",
|
| 111 |
+
"text": "We evaluated two pre-trained image tokenizers to assess their performances across three datasets. VQ-VAE (Van Den Oord, Vinyals et al. 2017 ###reference_b29###) is a variant of VAE, incorporating a vector quantization step to generate discrete latent representations. Table 5 ###reference_### displays the results using VQ-VAE and SD-VAE (default tokenizer used in GMS) as the image tokenizers. SD-VAE improves DSC by up to 2.2% and reduces the HD95 by up to 5.07, indicating that SD-VAE is more suitable for image tokenization compared to VQ-VAE. The performance improvements also affirm the appropriateness of SD-VAE for handling diverse image segmentation tasks."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5",
|
| 115 |
+
"parent_section_id": null,
|
| 116 |
+
"section_name": "Conclusion",
|
| 117 |
+
"text": "We presented Generative Medical Segmentation (GMS) to perform medical image segmentation. Unlike other segmentation methods where a discriminative model is trained, GMS leverages a powerful pre-trained vision foundation model, Stable Diffusion Variational Autoencoder (SD-VAE), to obtain latent representations of both images and masks. Next, our novel lightweight latent mapping model learns a mapping function from image latent representations to mask latent representations, enabling predicted latent representations of the mask to be generated from an image. Finally, the pre-trained model generates pixel-wise segmentation predictions in the image space using the predicted latent representation of the mask. Experiments on five datasets show that GMS outperforms the state-of-the-art discriminative segmentation models such as ACC-UNet. Moreover, the domain generalization ability of GMS is stronger than other domain generalization models, like DSU and MixStyle, due to the domain-agnostic latent embedding space used by GMS. One key limitation is that GMS can only segment 2D medical images, due to the currently used pre-trained image tokenizer SD-VAE being only trained on 2D natural images. In the future, we will explore extending GMS to 3D medical images by selecting an appropriate pre-trained model for 3D images and adapting the latent mapping model to work on its latent representations."
|
| 118 |
+
}
|
| 119 |
+
],
|
| 120 |
+
"appendix": [],
|
| 121 |
+
"tables": {
|
| 122 |
+
"1": {
|
| 123 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx3.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Quantitative segmentation performance on two ultrasound datasets. The best and second-best performances are bold and underlined, respectively. <sup class=\"ltx_sup\" id=\"Sx3.T1.11.1\">\u2020</sup> indicates fewer trainable parameters than GMS.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"Sx3.T1.9\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.8.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.8.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx3.T1.9.8.1.1.1\">Type</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.8.1.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx3.T1.9.8.1.2.1\">Model</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.8.1.3\">Trainable</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx3.T1.9.8.1.4\">BUS</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"3\" id=\"Sx3.T1.9.8.1.5\">BUSI</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.8.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.8.6.7\">Params (M)</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.3.1.1\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.4.2.2\">IoU\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T1.5.3.3\">HD95\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.6.4.4\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.7.5.5\">IoU\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.8.6.6\">HD95\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.9.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.9.2.1\" rowspan=\"5\"><span class=\"ltx_text\" id=\"Sx3.T1.9.9.2.1.1\">CNN</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.9.2.2\">UNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.9.2.3\">14.0</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.9.2.4\">81.50</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.9.2.5\">70.77</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.9.2.6\">17.68</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.9.2.7\">72.27</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.9.2.8\">63.00</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.9.2.9\">35.42</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.10.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.10.3.1\">MultiResUNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.10.3.2\">7.3</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.10.3.3\">80.41</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.10.3.4\">70.33</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.10.3.5\">19.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.10.3.6\">72.43</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.10.3.7\">62.59</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.10.3.8\">34.19</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.11.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.11.4.1\">ACC-UNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.11.4.2\">16.8</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.11.4.3\">83.40</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.11.4.4\">73.51</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.11.4.5\">16.49</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.11.4.6\">77.19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.11.4.7\">68.51</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.11.4.8\">25.49</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.12.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.12.5.1\">nnUNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.12.5.2\">20.6</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.12.5.3\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.12.5.3.1\">85.71</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.12.5.4\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.12.5.4.1\">78.68</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.12.5.5\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.12.5.5.1\">11.43</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.12.5.6\">79.45</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.12.5.7\">70.99</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.12.5.8\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.12.5.8.1\">22.13</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.7.1\">EGE-UNet<sup class=\"ltx_sup\" id=\"Sx3.T1.9.7.1.1\">\u2020</sup>\n</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.7.2\">0.05</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.7.3\">72.79</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.7.4\">61.96</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.7.5\">27.73</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.7.6\">75.17</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.7.7\">60.23</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.7.8\">29.51</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.13.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.13.6.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"Sx3.T1.9.13.6.1.1\">Transformer</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.13.6.2\">SwinUNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.13.6.3\">27.2</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.13.6.4\">80.37</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.13.6.5\">69.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.13.6.6\">20.49</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.13.6.7\">76.06</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.13.6.8\">66.10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.13.6.9\">28.69</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.14.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.14.7.1\">SME-SwinUNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.14.7.2\">169.8</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.14.7.3\">78.87</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.14.7.4\">67.13</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.14.7.5\">22.19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.14.7.6\">73.93</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.14.7.7\">62.70</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.14.7.8\">30.45</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.15.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.15.8.1\">UCTransNet</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.15.8.2\">66.4</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.15.8.3\">83.44</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.15.8.4\">73.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.15.8.5\">16.33</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.15.8.6\">76.55</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.15.8.7\">67.50</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.15.8.8\">25.46</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.16.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.16.9.1\" rowspan=\"4\"><span class=\"ltx_text\" id=\"Sx3.T1.9.16.9.1.1\">Generative</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.16.9.2\">MedSegDiff-V2</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.16.9.3\">129.4</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.16.9.4\">83.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.16.9.5\">74.36</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.16.9.6\">17.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.16.9.7\">71.32</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.16.9.8\">62.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T1.9.16.9.9\">38.47</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.17.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.17.10.1\">SDSeg</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.17.10.2\">329.0</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.17.10.3\">82.47</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.17.10.4\">73.45</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.17.10.5\">20.53</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.17.10.6\">72.76</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.17.10.7\">63.52</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.17.10.8\">36.79</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.18.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.18.11.1\">GSS</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T1.9.18.11.2\">49.8</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.18.11.3\">84.86</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.18.11.4\">77.58</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T1.9.18.11.5\">22.42</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.18.11.6\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.18.11.6.1\">79.56</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.18.11.7\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T1.9.18.11.7.1\">71.22</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T1.9.18.11.8\">28.20</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T1.9.19.12\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.19.12.1\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.1.1\">GMS (Ours)</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.19.12.2\">1.5</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T1.9.19.12.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.3.1\">88.42</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T1.9.19.12.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.4.1\">80.56</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T1.9.19.12.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.5.1\">6.79</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T1.9.19.12.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.6.1\">81.43</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T1.9.19.12.7\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.7.1\">72.58</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T1.9.19.12.8\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T1.9.19.12.8.1\">19.50</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 124 |
+
"capture": "Table 1: Quantitative segmentation performance on two ultrasound datasets. The best and second-best performances are bold and underlined, respectively. \u2020 indicates fewer trainable parameters than GMS."
|
| 125 |
+
},
|
| 126 |
+
"2": {
|
| 127 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx3.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Quantitative segmentation performance on three medical datasets of different modalities. The best and second-best performances are bold and underlined, respectively. <sup class=\"ltx_sup\" id=\"Sx3.T2.14.1\">\u2020</sup> indicates fewer trainable parameters than GMS.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"Sx3.T2.12\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.11.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.11.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx3.T2.12.11.1.1.1\">Type</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.11.1.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx3.T2.12.11.1.2.1\">Model</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx3.T2.12.11.1.3\">GlaS</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx3.T2.12.11.1.4\">HAM10000</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"3\" id=\"Sx3.T2.12.11.1.5\">Kvasir-Instrument</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.11.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.3.1.1\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.4.2.2\">IoU\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.5.3.3\">HD95\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.6.4.4\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.7.5.5\">IoU\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.8.6.6\">HD95\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.9.7.7\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.10.8.8\">IoU\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.11.9.9\">HD95\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.12.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.12.2.1\" rowspan=\"5\"><span class=\"ltx_text\" id=\"Sx3.T2.12.12.2.1.1\">CNN</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.12.2.2\">UNet</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.3\">87.99</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.4\">80.01</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.12.2.5\">18.45</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.6\">92.24</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.7\">86.93</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.12.2.8\">13.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.9\">93.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.10\">89.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.12.2.11\">8.71</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.13.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.13.3.1\">MultiResUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.2\">88.34</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.3\">80.34</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.13.3.4\">17.42</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.5\">92.74</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.6\">87.60</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.13.3.7\">13.02</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.8\">92.31</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.9\">87.03</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.13.3.10\">9.49</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.14.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.14.4.1\">ACC-UNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.2\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.14.4.2.1\">88.60</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.3\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.14.4.3.1\">80.84</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.14.4.4\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.14.4.4.1\">17.14</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.5\">93.20</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.6\">88.44</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.14.4.7\">10.83</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.8\">93.91</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.9\">89.73</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.14.4.10\">8.74</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.15.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.15.5.1\">nnUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.2\">87.25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.3\">78.24</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.15.5.4\">20.07</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.5\">93.83</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.6\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.15.5.6.1\">89.32</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.15.5.7\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.15.5.7.1\">9.43</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.8\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.15.5.8.1\">93.95</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.9\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.15.5.9.1\">90.20</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.15.5.10\">8.51</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.10.1\">EGE-UNet<sup class=\"ltx_sup\" id=\"Sx3.T2.12.10.1.1\">\u2020</sup>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.2\">83.25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.3\">71.31</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.10.4\">28.79</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.5\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.10.5.1\">93.90</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.6\">88.50</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.10.7\">10.01</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.8\">92.65</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.9\">86.30</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.10.10\">9.04</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.16.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.16.6.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"Sx3.T2.12.16.6.1.1\">Transformer</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.16.6.2\">SwinUNet</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.3\">86.44</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.4\">76.89</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.16.6.5\">19.63</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.6\">93.51</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.7\">88.68</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.16.6.8\">10.46</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.9\">92.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.10\">85.83</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.16.6.11\">9.15</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.17.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.17.7.1\">SME-SwinUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.2\">83.72</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.3\">72.77</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.17.7.4\">26.23</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.5\">92.71</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.6\">87.21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.17.7.7\">12.53</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.8\">93.32</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.9\">88.27</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.17.7.10\">8.91</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.18.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.18.8.1\">UCTransNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.2\">87.17</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.3\">78.80</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.18.8.4\">20.79</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.5\">93.45</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.6\">88.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.18.8.7\">10.91</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.8\">93.27</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.9\">88.48</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.18.8.10\">8.84</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.19.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.19.9.1\" rowspan=\"4\"><span class=\"ltx_text\" id=\"Sx3.T2.12.19.9.1.1\">Generative</span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.19.9.2\">MedSegDiff-V2</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.3\">86.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.4\">77.05</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.19.9.5\">19.96</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.6\">92.28</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.7\">87.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.19.9.8\">13.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.9\">92.29</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.10\">87.21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx3.T2.12.19.9.11\">9.06</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.20.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.20.10.1\">SDSeg</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.2\">86.76</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.3\">76.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.20.10.4\">21.41</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.5\">92.54</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.6\">87.53</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.20.10.7\">12.29</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.8\">91.23</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.9\">86.54</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.20.10.10\">9.38</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.21.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx3.T2.12.21.11.1\">GSS</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.2\">87.41</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.3\">79.17</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.21.11.4\">19.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.5\">92.92</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.6\">87.98</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx3.T2.12.21.11.7\">11.29</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.8\">93.66</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.9\">89.15</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx3.T2.12.21.11.10\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.21.11.10.1\">7.25</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx3.T2.12.22.12\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.22.12.1\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.1.1\">GMS (Ours)</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.2.1\">88.98</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.3.1\">81.16</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.22.12.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.4.1\">16.32</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.5.1\">94.11</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.6.1\">89.68</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx3.T2.12.22.12.7\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.7.1\">9.32</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.8\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.8.1\">94.24</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.9\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx3.T2.12.22.12.9.1\">90.02</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx3.T2.12.22.12.10\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx3.T2.12.22.12.10.1\">7.03</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 128 |
+
"capture": "Table 2: Quantitative segmentation performance on three medical datasets of different modalities. The best and second-best performances are bold and underlined, respectively. \u2020 indicates fewer trainable parameters than GMS."
|
| 129 |
+
},
|
| 130 |
+
"3": {
|
| 131 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx4.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Quantitative performance for domain generalization segmentation. A to B indicates A for training and B for testing. Best and second-best performances are bold and underlined, respectively. <sup class=\"ltx_sup\" id=\"Sx4.T3.9.1\">\u2020</sup> indicates fewer trainable parameters than GMS.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"Sx4.T3.7\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.6.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.6.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx4.T3.7.6.1.1.1\">Model</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" colspan=\"2\" id=\"Sx4.T3.7.6.1.2\">BUSI to BUS</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"Sx4.T3.7.6.1.3\">BUS to BUSI</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.3.1.1\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T3.4.2.2\">HD95\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.5.3.3\">DSC\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.6.4.4\">HD95\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.7.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.7.2.1\">UNet</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.7.2.2\">62.99</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.7.2.3\">47.26</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.7.2.4\">53.83</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.7.2.5\">96.81</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.8.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.8.3.1\">MultiResUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.8.3.2\">61.53</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.8.3.3\">53.97</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.8.3.4\">56.25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.8.3.5\">94.31</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.9.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.9.4.1\">ACC-UNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.9.4.2\">64.60</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.9.4.3\">42.87</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.9.4.4\">47.80</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.9.4.5\">135.24</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.10.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.10.5.1\">nnUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.10.5.2\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx4.T3.7.10.5.2.1\">78.39</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.10.5.3\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx4.T3.7.10.5.3.1\">20.53</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.10.5.4\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx4.T3.7.10.5.4.1\">59.13</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.10.5.5\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"Sx4.T3.7.10.5.5.1\">89.32</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.5.1\">EGE-UNet<sup class=\"ltx_sup\" id=\"Sx4.T3.7.5.1.1\">\u2020</sup>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.5.2\">69.04</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.5.3\">34.63</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.5.4\">54.46</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.5.5\">105.23</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.11.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.11.6.1\">SwinUNet</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.11.6.2\">78.38</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.11.6.3\">21.94</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.11.6.4\">57.47</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.11.6.5\">91.63</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.12.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.12.7.1\">SME-SwinUNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.12.7.2\">74.78</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.12.7.3\">25.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.12.7.4\">58.28</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.12.7.5\">91.26</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.13.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.13.8.1\">UCTransNet</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.13.8.2\">72.76</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.13.8.3\">28.47</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.13.8.4\">56.94</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.13.8.5\">94.32</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.14.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.14.9.1\">MixStyle</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.14.9.2\">73.07</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.14.9.3\">26.52</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.14.9.4\">57.97</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.14.9.5\">93.54</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.15.10\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.15.10.1\">DSU</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.15.10.2\">66.15</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.15.10.3\">40.03</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.15.10.4\">56.70</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.15.10.5\">95.31</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.16.11\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.16.11.1\">MedSegDiff-V2</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.16.11.2\">69.56</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.16.11.3\">32.51</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.16.11.4\">55.21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T3.7.16.11.5\">98.57</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.17.12\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.17.12.1\">SDSeg</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.17.12.2\">74.03</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.17.12.3\">26.32</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.17.12.4\">57.03</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.17.12.5\">94.61</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.18.13\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T3.7.18.13.1\">GSS</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.18.13.2\">68.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T3.7.18.13.3\">35.74</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.18.13.4\">58.72</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T3.7.18.13.5\">92.57</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T3.7.19.14\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.19.14.1\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T3.7.19.14.1.1\">GMS (Ours)</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx4.T3.7.19.14.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T3.7.19.14.2.1\">80.31</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"Sx4.T3.7.19.14.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T3.7.19.14.3.1\">18.55</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx4.T3.7.19.14.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T3.7.19.14.4.1\">61.60</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"Sx4.T3.7.19.14.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T3.7.19.14.5.1\">85.25</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 132 |
+
"capture": "Table 3: Quantitative performance for domain generalization segmentation. A to B indicates A for training and B for testing. Best and second-best performances are bold and underlined, respectively. \u2020 indicates fewer trainable parameters than GMS."
|
| 133 |
+
},
|
| 134 |
+
"4": {
|
| 135 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx4.T4\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Quantitative segmentation performance on three datasets for ablation study using different loss functions.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"Sx4.T4.15\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"Sx4.T4.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"Sx4.T4.1.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx4.T4.1.1.1.1\"></span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T4.2.2.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx4.T4.2.2.2.1\"></span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx4.T4.2.2.3\">BUSI</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx4.T4.2.2.4\">HAM10000</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"3\" id=\"Sx4.T4.2.2.5\">Kvasir</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T4.11.11\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.3.3.1\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.4.4.2\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"Sx4.T4.5.5.3\">HD95\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.6.6.4\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.7.7.5\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"Sx4.T4.8.8.6\">HD95\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.9.9.7\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.10.10.8\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T4.11.11.9\">HD95\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx4.T4.12.12\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"Sx4.T4.12.12.1\"></th>\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T4.12.12.2\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.3\">80.25</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.4\">71.26</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T4.12.12.5\">21.57</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.6\">93.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.7\">89.41</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T4.12.12.8\">9.95</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.9\">92.93</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.10\">88.28</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T4.12.12.11\">10.21</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T4.13.13\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"Sx4.T4.13.13.2\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T4.13.13.1\"></th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.3\">78.75</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.4\">69.87</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T4.13.13.5\">24.78</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.6\">93.64</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.7\">88.99</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"Sx4.T4.13.13.8\">10.27</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.9\">93.00</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.10\">88.47</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T4.13.13.11\">10.68</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T4.15.15\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b\" id=\"Sx4.T4.14.14.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_r\" id=\"Sx4.T4.15.15.2\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.3.1\">81.43</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.4.1\">72.58</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"Sx4.T4.15.15.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.5.1\">19.50</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.6.1\">94.11</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.7\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.7.1\">89.68</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"Sx4.T4.15.15.8\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.8.1\">9.32</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.9\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.9.1\">94.24</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.10\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.10.1\">90.02</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T4.15.15.11\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T4.15.15.11.1\">7.03</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 136 |
+
"capture": "Table 4: Quantitative segmentation performance on three datasets for ablation study using different loss functions."
|
| 137 |
+
},
|
| 138 |
+
"5": {
|
| 139 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx4.T5\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 5: </span>Quantitative segmentation performance on three datasets using different image tokenizers.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"Sx4.T5.9\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"Sx4.T5.9.10.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T5.9.10.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"Sx4.T5.9.10.1.1.1\">Image Tokenizer</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx4.T5.9.10.1.2\">BUSI</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"3\" id=\"Sx4.T5.9.10.1.3\">HAM10000</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"3\" id=\"Sx4.T5.9.10.1.4\">Kvasir</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T5.9.9\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.1.1.1\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.2.2.2\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"Sx4.T5.3.3.3\">HD95\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.4.4.4\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.5.5.5\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"Sx4.T5.6.6.6\">HD95\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.7.7.7\">DSC\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.8.8.8\">IoU\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"Sx4.T5.9.9.9\">HD95\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx4.T5.9.11.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T5.9.11.1.1\">VQ-VAE</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.2\">79.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.3\">70.34</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T5.9.11.1.4\">24.57</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.5\">92.77</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.6\">87.61</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"Sx4.T5.9.11.1.7\">13.34</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.8\">92.47</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.9\">88.31</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T5.9.11.1.10\">9.73</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T5.9.12.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_r\" id=\"Sx4.T5.9.12.2.1\">SD-VAE</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.2.1\">81.43</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.3.1\">72.58</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"Sx4.T5.9.12.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.4.1\">19.50</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.5.1\">94.11</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.6.1\">89.68</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"Sx4.T5.9.12.2.7\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.7.1\">9.32</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.8\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.8.1\">94.24</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.9\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.9.1\">90.02</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"Sx4.T5.9.12.2.10\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T5.9.12.2.10.1\">7.03</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 140 |
+
"capture": "Table 5: Quantitative segmentation performance on three datasets using different image tokenizers."
|
| 141 |
+
}
|
| 142 |
+
},
|
| 143 |
+
"image_paths": {
|
| 144 |
+
"1": {
|
| 145 |
+
"figure_path": "2403.18198v2_figure_1.png",
|
| 146 |
+
"caption": "Figure 1: GMS network architecture for 2D medical image segmentation. \u2130\u2130\\mathcal{E}caligraphic_E and \ud835\udc9f\ud835\udc9f\\mathcal{D}caligraphic_D represent a pre-trained vision foundation model and weights are frozen. We utilize the model weights from the Stable Diffusion VAE for \u2130\u2130\\mathcal{E}caligraphic_E and \ud835\udc9f\ud835\udc9f\\mathcal{D}caligraphic_D. The latent mapping model (orange box) contains convolution blocks and self-attention blocks but does not contain down-sampling layers. Such a design helps to preserve the spatial information in the input feature vectors. Here, Conv means the 2D convolution operation, and GN represents the Group Normalization.",
|
| 147 |
+
"url": "http://arxiv.org/html/2403.18198v2/x1.png"
|
| 148 |
+
},
|
| 149 |
+
"2": {
|
| 150 |
+
"figure_path": "2403.18198v2_figure_2.png",
|
| 151 |
+
"caption": "Figure 2: Exemplar segmentation results of GMS and other state-of-the-art methods. From top to bottom are images from the BUS, BUSI, GlaS, HAM10000 and Kvasir-Instrument datasets. The green contours are the ground truth, and the yellow contours are the model predictions. Zoom in for more details.",
|
| 152 |
+
"url": "http://arxiv.org/html/2403.18198v2/x2.png"
|
| 153 |
+
}
|
| 154 |
+
},
|
| 155 |
+
"validation": true,
|
| 156 |
+
"references": [
|
| 157 |
+
{
|
| 158 |
+
"1": {
|
| 159 |
+
"title": "Dataset of breast ultrasound images.",
|
| 160 |
+
"author": "Al-Dhabyani, W.; Gomaa, M.; Khaled, H.; and Fahmy, A. 2020.",
|
| 161 |
+
"venue": "Data in brief, 28: 104863.",
|
| 162 |
+
"url": null
|
| 163 |
+
}
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"2": {
|
| 167 |
+
"title": "Swin-unet: Unet-like pure transformer for medical image segmentation.",
|
| 168 |
+
"author": "Cao, H.; Wang, Y.; Chen, J.; Jiang, D.; Zhang, X.; Tian, Q.; and Wang, M. 2022.",
|
| 169 |
+
"venue": "In European conference on computer vision, 205\u2013218. Springer.",
|
| 170 |
+
"url": null
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"3": {
|
| 175 |
+
"title": "Semi-supervised task-driven data augmentation for medical image segmentation.",
|
| 176 |
+
"author": "Chaitanya, K.; Karani, N.; Baumgartner, C. F.; Erdil, E.; Becker, A.; Donati, O.; and Konukoglu, E. 2021.",
|
| 177 |
+
"venue": "Medical Image Analysis, 68: 101934.",
|
| 178 |
+
"url": null
|
| 179 |
+
}
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"4": {
|
| 183 |
+
"title": "Generative semantic segmentation.",
|
| 184 |
+
"author": "Chen, J.; Lu, J.; Zhu, X.; and Zhang, L. 2023.",
|
| 185 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 7111\u20137120.",
|
| 186 |
+
"url": null
|
| 187 |
+
}
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"5": {
|
| 191 |
+
"title": "Rethinking atrous convolution for semantic image segmentation.",
|
| 192 |
+
"author": "Chen, L.-C.; Papandreou, G.; Schroff, F.; and Adam, H. 2017.",
|
| 193 |
+
"venue": "arXiv:1706.05587.",
|
| 194 |
+
"url": null
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"6": {
|
| 199 |
+
"title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale.",
|
| 200 |
+
"author": "Dosovitskiy, A.; Beyer, L.; Kolesnikov, A.; Weissenborn, D.; Zhai, X.; Unterthiner, T.; Dehghani, M.; Minderer, M.; Heigold, G.; Gelly, S.; Uszkoreit, J.; and Houlsby, N. 2021.",
|
| 201 |
+
"venue": "In International Conference on Learning Representations.",
|
| 202 |
+
"url": null
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"7": {
|
| 207 |
+
"title": "Generative adversarial nets.",
|
| 208 |
+
"author": "Goodfellow, I.; Pouget-Abadie, J.; Mirza, M.; Xu, B.; Warde-Farley, D.; Ozair, S.; Courville, A.; and Bengio, Y. 2014.",
|
| 209 |
+
"venue": "Advances in neural information processing systems, 27.",
|
| 210 |
+
"url": null
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"8": {
|
| 215 |
+
"title": "Deep residual learning for image recognition.",
|
| 216 |
+
"author": "He, K.; Zhang, X.; Ren, S.; and Sun, J. 2016.",
|
| 217 |
+
"venue": "In Proceedings of the IEEE conference on computer vision and pattern recognition, 770\u2013778.",
|
| 218 |
+
"url": null
|
| 219 |
+
}
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"9": {
|
| 223 |
+
"title": "Brain Lesion Synthesis via Progressive Adversarial Variational Auto-Encoder.",
|
| 224 |
+
"author": "Huo, J.; Vakharia, V.; Wu, C.; Sharan, A.; Ko, A.; Ourselin, S.; and Sparks, R. 2022.",
|
| 225 |
+
"venue": "In International Workshop on Simulation and Synthesis in Medical Imaging, 101\u2013111. Springer.",
|
| 226 |
+
"url": null
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"10": {
|
| 231 |
+
"title": "Acc-unet: A completely convolutional unet model for the 2020s.",
|
| 232 |
+
"author": "Ibtehaz, N.; and Kihara, D. 2023.",
|
| 233 |
+
"venue": "In International Conference on Medical Image Computing and Computer-Assisted Intervention, 692\u2013702. Springer.",
|
| 234 |
+
"url": null
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"11": {
|
| 239 |
+
"title": "MultiResUNet: Rethinking the U-Net architecture for multimodal biomedical image segmentation.",
|
| 240 |
+
"author": "Ibtehaz, N.; and Rahman, M. S. 2020.",
|
| 241 |
+
"venue": "Neural networks, 121: 74\u201387.",
|
| 242 |
+
"url": null
|
| 243 |
+
}
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"12": {
|
| 247 |
+
"title": "nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation.",
|
| 248 |
+
"author": "Isensee, F.; Jaeger, P. F.; Kohl, S. A.; Petersen, J.; and Maier-Hein, K. H. 2021.",
|
| 249 |
+
"venue": "Nature methods, 18(2): 203\u2013211.",
|
| 250 |
+
"url": null
|
| 251 |
+
}
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"13": {
|
| 255 |
+
"title": "Kvasir-instrument: Diagnostic and therapeutic tool segmentation dataset in gastrointestinal endoscopy.",
|
| 256 |
+
"author": "Jha, D.; Ali, S.; Emanuelsen, K.; Hicks, S. A.; Thambawita, V.; Garcia-Ceja, E.; Riegler, M. A.; de Lange, T.; Schmidt, P. T.; Johansen, H. D.; et al. 2021.",
|
| 257 |
+
"venue": "In MultiMedia Modeling: 27th International Conference, MMM 2021, Prague, Czech Republic, June 22\u201324, 2021, Proceedings, Part II 27, 218\u2013229. Springer.",
|
| 258 |
+
"url": null
|
| 259 |
+
}
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"14": {
|
| 263 |
+
"title": "Training generative adversarial networks with limited data.",
|
| 264 |
+
"author": "Karras, T.; Aittala, M.; Hellsten, J.; Laine, S.; Lehtinen, J.; and Aila, T. 2020.",
|
| 265 |
+
"venue": "Advances in neural information processing systems, 33: 12104\u201312114.",
|
| 266 |
+
"url": null
|
| 267 |
+
}
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"15": {
|
| 271 |
+
"title": "Auto-encoding variational bayes.",
|
| 272 |
+
"author": "Kingma, D. P.; and Welling, M. 2013.",
|
| 273 |
+
"venue": "arXiv:1312.6114.",
|
| 274 |
+
"url": null
|
| 275 |
+
}
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"16": {
|
| 279 |
+
"title": "Segment anything.",
|
| 280 |
+
"author": "Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; et al. 2023.",
|
| 281 |
+
"venue": "In Proceedings of the IEEE/CVF International Conference on Computer Vision, 4015\u20134026.",
|
| 282 |
+
"url": null
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"17": {
|
| 287 |
+
"title": "Breaking the dilemma of medical image-to-image translation.",
|
| 288 |
+
"author": "Kong, L.; Lian, C.; Huang, D.; Hu, Y.; Zhou, Q.; et al. 2021.",
|
| 289 |
+
"venue": "Advances in Neural Information Processing Systems, 34: 1964\u20131978.",
|
| 290 |
+
"url": null
|
| 291 |
+
}
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"18": {
|
| 295 |
+
"title": "Semantic segmentation with generative models: Semi-supervised learning and strong out-of-domain generalization.",
|
| 296 |
+
"author": "Li, D.; Yang, J.; Kreis, K.; Torralba, A.; and Fidler, S. 2021.",
|
| 297 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 8300\u20138311.",
|
| 298 |
+
"url": null
|
| 299 |
+
}
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"19": {
|
| 303 |
+
"title": "Uncertainty Modeling for Out-of-Distribution Generalization.",
|
| 304 |
+
"author": "Li, X.; Dai, Y.; Ge, Y.; Liu, J.; Shan, Y.; and DUAN, L. 2022.",
|
| 305 |
+
"venue": "In International Conference on Learning Representations.",
|
| 306 |
+
"url": null
|
| 307 |
+
}
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"20": {
|
| 311 |
+
"title": "Stable Diffusion Segmentation for Biomedical Images with Single-step Reverse Process.",
|
| 312 |
+
"author": "Lin, T.; Chen, Z.; Yan, Z.; Zheng, F.; and Yu, W. 2024.",
|
| 313 |
+
"venue": "arXiv:2406.18361.",
|
| 314 |
+
"url": null
|
| 315 |
+
}
|
| 316 |
+
},
|
| 317 |
+
{
|
| 318 |
+
"21": {
|
| 319 |
+
"title": "One model to synthesize them all: Multi-contrast multi-scale transformer for missing data imputation.",
|
| 320 |
+
"author": "Liu, J.; Pasumarthi, S.; Duffy, B.; Gong, E.; Datta, K.; and Zaharchuk, G. 2023.",
|
| 321 |
+
"venue": "IEEE Transactions on Medical Imaging, 42(9): 2577\u20132591.",
|
| 322 |
+
"url": null
|
| 323 |
+
}
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"22": {
|
| 327 |
+
"title": "Decoupled Weight Decay Regularization.",
|
| 328 |
+
"author": "Loshchilov, I.; and Hutter, F. 2019.",
|
| 329 |
+
"venue": "In International Conference on Learning Representations.",
|
| 330 |
+
"url": null
|
| 331 |
+
}
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"23": {
|
| 335 |
+
"title": "High-resolution image synthesis with latent diffusion models.",
|
| 336 |
+
"author": "Rombach, R.; Blattmann, A.; Lorenz, D.; Esser, P.; and Ommer, B. 2022.",
|
| 337 |
+
"venue": "In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 10684\u201310695.",
|
| 338 |
+
"url": null
|
| 339 |
+
}
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"24": {
|
| 343 |
+
"title": "U-net: Convolutional networks for biomedical image segmentation.",
|
| 344 |
+
"author": "Ronneberger, O.; Fischer, P.; and Brox, T. 2015.",
|
| 345 |
+
"venue": "In Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, 234\u2013241. Springer.",
|
| 346 |
+
"url": null
|
| 347 |
+
}
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"25": {
|
| 351 |
+
"title": "Ege-unet: an efficient group enhanced unet for skin lesion segmentation.",
|
| 352 |
+
"author": "Ruan, J.; Xie, M.; Gao, J.; Liu, T.; and Fu, Y. 2023.",
|
| 353 |
+
"venue": "In International Conference on Medical Image Computing and Computer-Assisted Intervention, 481\u2013490. Springer.",
|
| 354 |
+
"url": null
|
| 355 |
+
}
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"26": {
|
| 359 |
+
"title": "Laion-5b: An open large-scale dataset for training next generation image-text models.",
|
| 360 |
+
"author": "Schuhmann, C.; Beaumont, R.; Vencu, R.; Gordon, C.; Wightman, R.; Cherti, M.; Coombes, T.; Katta, A.; Mullis, C.; Wortsman, M.; et al. 2022.",
|
| 361 |
+
"venue": "Advances in Neural Information Processing Systems, 35: 25278\u201325294.",
|
| 362 |
+
"url": null
|
| 363 |
+
}
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"27": {
|
| 367 |
+
"title": "Gland segmentation in colon histology images: The glas challenge contest.",
|
| 368 |
+
"author": "Sirinukunwattana, K.; Pluim, J. P.; Chen, H.; Qi, X.; Heng, P.-A.; Guo, Y. B.; Wang, L. Y.; Matuszewski, B. J.; Bruni, E.; Sanchez, U.; et al. 2017.",
|
| 369 |
+
"venue": "Medical image analysis, 35: 489\u2013502.",
|
| 370 |
+
"url": null
|
| 371 |
+
}
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"28": {
|
| 375 |
+
"title": "The HAM10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions.",
|
| 376 |
+
"author": "Tschandl, P.; Rosendahl, C.; and Kittler, H. 2018.",
|
| 377 |
+
"venue": "Scientific data, 5(1): 1\u20139.",
|
| 378 |
+
"url": null
|
| 379 |
+
}
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"29": {
|
| 383 |
+
"title": "Neural discrete representation learning.",
|
| 384 |
+
"author": "Van Den Oord, A.; Vinyals, O.; et al. 2017.",
|
| 385 |
+
"venue": "Advances in neural information processing systems, 30.",
|
| 386 |
+
"url": null
|
| 387 |
+
}
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"30": {
|
| 391 |
+
"title": "Uctransnet: rethinking the skip connections in u-net from a channel-wise perspective with transformer.",
|
| 392 |
+
"author": "Wang, H.; Cao, P.; Wang, J.; and Zaiane, O. R. 2022a.",
|
| 393 |
+
"venue": "In Proceedings of the AAAI conference on artificial intelligence, volume 36, 2441\u20132449.",
|
| 394 |
+
"url": null
|
| 395 |
+
}
|
| 396 |
+
},
|
| 397 |
+
{
|
| 398 |
+
"31": {
|
| 399 |
+
"title": "SMESwin Unet: Merging CNN and transformer for medical image segmentation.",
|
| 400 |
+
"author": "Wang, Z.; Min, X.; Shi, F.; Jin, R.; Nawrin, S. S.; Yu, I.; and Nagatomi, R. 2022b.",
|
| 401 |
+
"venue": "In International Conference on Medical Image Computing and Computer-Assisted Intervention, 517\u2013526. Springer.",
|
| 402 |
+
"url": null
|
| 403 |
+
}
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"32": {
|
| 407 |
+
"title": "Medsegdiff-v2: Diffusion-based medical image segmentation with transformer.",
|
| 408 |
+
"author": "Wu, J.; Ji, W.; Fu, H.; Xu, M.; Jin, Y.; and Xu, Y. 2024.",
|
| 409 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, 6030\u20136038.",
|
| 410 |
+
"url": null
|
| 411 |
+
}
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"33": {
|
| 415 |
+
"title": "Automated breast ultrasound lesions detection using convolutional neural networks.",
|
| 416 |
+
"author": "Yap, M. H.; Pons, G.; Marti, J.; Ganau, S.; Sentis, M.; Zwiggelaar, R.; Davison, A. K.; and Marti, R. 2017.",
|
| 417 |
+
"venue": "IEEE journal of biomedical and health informatics, 22(4): 1218\u20131226.",
|
| 418 |
+
"url": null
|
| 419 |
+
}
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"34": {
|
| 423 |
+
"title": "Synthetic augmentation with large-scale unconditional pre-training.",
|
| 424 |
+
"author": "Ye, J.; Ni, H.; Jin, P.; Huang, S. X.; and Xue, Y. 2023.",
|
| 425 |
+
"venue": "In International Conference on Medical Image Computing and Computer-Assisted Intervention, 754\u2013764. Springer.",
|
| 426 |
+
"url": null
|
| 427 |
+
}
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"35": {
|
| 431 |
+
"title": "Mixstyle neural networks for domain generalization and adaptation.",
|
| 432 |
+
"author": "Zhou, K.; Yang, Y.; Qiao, Y.; and Xiang, T. 2023.",
|
| 433 |
+
"venue": "International Journal of Computer Vision, 1\u201315.",
|
| 434 |
+
"url": null
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
],
|
| 438 |
+
"url": "http://arxiv.org/html/2403.18198v2"
|
| 439 |
+
}
|
20240819/2404.09937v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2404.12965v2.json
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "A note on higher-order and nonlinear limiting approaches for continuously bounds-preserving discontinuous Galerkin methods",
|
| 3 |
+
"abstract": "In (Dzanic, J. Comp. Phys., 508:113010, 2024), a limiting approach for high-order discontinuous Galerkin schemes was introduced which allowed for imposing constraints on the solution continuously (i.e., everywhere within the element). While exact for linear constraint functionals, this approach only imposed a sufficient (but not the minimum necessary) amount of limiting for nonlinear constraint functionals. This short note shows how this limiting approach can be extended to allow exactness for general nonlinear quasiconcave constraint functionals through a nonlinear limiting procedure, reducing unnecessary numerical dissipation. Some examples are shown for nonlinear pressure and entropy constraints in the compressible gas dynamics equations, where both analytic and iterative approaches are used.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The use of high-order discontinuous Galerkin (DG) schemes for simulating transport-dominated physics often requires additional numerical stabilization to ensure the discrete solution is structure-preserving (i.e., is well-behaved and satisfies physical constraints). One such way of achieving this desirable behavior is applying some form of a posteriori limiting on the solution, where the high-order DG approximation is blended with a secondary, more robust (but generally less accurate) approximation to yield a bounds-preserving (i.e., constraint-satisfying) solution, a few examples of which are presented in the following references [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###, 8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###]. This form of limiting has been widely used for simulating complex physical systems, particularly ones of predominantly hyperbolic nature, where numerical solutions must abide by known physical constraints (e.g., positivity of density and pressure in gas dynamics). However, a potential drawback of these limiting methods is that they are typically performed at discrete nodal locations for the DG solution, which only ensures that the given constraints are satisfied at those points. While adequate for many problems, the use of discretely bounds-preserving limiting may not be robust enough for applications where the DG solution must be evaluated at arbitrary locations (e.g., coupled meshes/solvers, remapping in arbitrary Lagrangian-Eulerian methods, adaptive mesh refinement, etc.). In such scenarios, the limited solution may still violate the constraints at these arbitrary points, causing the failure of the numerical scheme.\nA potential remedy for this problem was introduced in Dzanic [12 ###reference_b12###] through a limiting approach that ensures the limited solution is continuously bounds-preserving (i.e., across the entire solution polynomial). The approach relied on a novel functional for computing the limiting factor for the \u201csqueeze\u201d limiter of Zhang and Shu [1 ###reference_b1###], such that if the minimum of this limiting functional within an element was found (via a spatial optimization algorithm), the limited solution was guaranteed to be continuously bounds-preserving for any arbitrary quasiconcave constraint functional. This approach differed from other continuously bounds-preserving methods which rely on bounded basis functions (e.g., Bernstein polynomials) [3 ###reference_b3###, 13 ###reference_b13###, 5 ###reference_b5###], which may suffer from numerical inefficiencies and excessive dissipation (a more in-depth discussion on these issues is presented in Dzanic [12 ###reference_b12###], Section 1). The underlying mathematical basis of this approach was a linearization of the constraint functionals being enforced on the solution. For linear constraints (e.g., maximum principle on scalar solutions, positivity of density in gas dynamics, etc.), the computed limiting factor was \u201cexact\u201d (i.e., it was the minimum necessary amount of limiting). However, for nonlinear constraint functionals, the linearization could only ensure that the computed limiting factor was sufficient, such that a smaller limiting factor (i.e., less numerical dissipation) could still yield a solution which satisfied the constraints continuously. Therefore, the limiting approach was suboptimal for nonlinear constraints commonly encountered in hyperbolic systems of equations as it was not applying only the minimum amount of limiting necessary.\nThe purpose of this short note is to show how this limiting functional can be further modified such that the computed limiting factor is exact for arbitrary nonlinear quasiconcave functionals, reducing unnecessary numerical dissipation. While this proposed modification can be applied to general hyperbolic conservation laws, we focus specifically on the compressible Euler equations which admit higher-order and nonlinear constraints for pressure and entropy. Some examples are shown for the new approach, where both analytic and iterative approaches can be applied."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": "In this section, some preliminaries are presented for the proposed modification, which briefly summarize the limiting approach introduced in Dzanic [12 ###reference_b12###]."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Governing equations and constraints",
|
| 21 |
+
"text": "The exemplar hyperbolic conservation law used for this work is the compressible Euler equations for gas dynamics in dimensions, written in conservation form as\nwhere\nHere, is the density, is the momentum, and is the total energy. The symbol denotes the identity matrix in , denotes the velocity, and denotes the pressure, computed (with the assumption of a calorically perfect gas) as\nwhere is the specific heat ratio and is the specific internal energy.\nThe solution of the Euler equations is endowed with a convex invariant set corresponding to the positivity of density (), positivity of pressure/internal energy (), and a minimum principle on the specific physical entropy (), where [14 ###reference_b14###]. This can be represented by the positivity of a set of constraint functionals as\nwhere small tolerances (, ) are included for numerical stability purposes. This set of constraints is particularly interesting as they each have a unique mathematical character, consisting of a linear density constraint, a quadratic pressure/internal energy constraint, and a nonlinear entropy constraint. In many limiting approaches, the entropy constraint is neglected and only positivity of density/pressure is enforced (e.g., positivity-preserving limiters)."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Discontinuous Galerkin methods",
|
| 27 |
+
"text": "The underlying numerical method for this approach is the discontinuous Galerkin scheme (see, for example, Hesthaven and Warburton [15 ###reference_b15###]), where the solution within each element of the mesh is represented by a set of polynomial basis functions as\nwhere are the basis functions, are their associated coefficients, and is the piece-wise polynomial space spanned by the basis functions.\nWe consider a semi-discrete weak formulation of a hyperbolic conservation law in the form of\nwhere is a test function and is a numerical interface flux. Furthermore, we define the element-wise mean for an arbitrary element as\nwhich, under some relatively minor assumptions on the numerical scheme, generally preserves convex invariants of hyperbolic systems, such that it may be used as secondary bounds-preserving approximation for a limiting approach (see Zhang and Shu [1 ###reference_b1###], Zhang and Shu [16 ###reference_b16###], and derived works)."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Continuously bounds-preserving limiting",
|
| 33 |
+
"text": "The proposed limiting approach in Dzanic [12 ###reference_b12###] relies on the a posteriori \u201csqueeze\u201d limiter of Zhang and Shu [1 ###reference_b1###], which linearly contracts the high-order DG solution towards the element-wise mean based on a limiting factor as\nWe drop the subscript for brevity as this limiter applies to every element in the domain. The goal of the proposed approach is to find a limiting factor such that\nfor any arbitrary quasiconcave constraint functional . This approach differed from standard a posteriori limiting methods for DG schemes in that it enforced constraints continuously instead of at discrete nodal locations (here, is used to refer to all locations within the element). It was shown in Dzanic [12 ###reference_b12###] that if one introduces the modified limiting functional as\nwhere\nthen setting the limiting factor as\nguarantees a limiting scheme that is continuously bounds-preserving.\nIn this approach, one can define \u201cexactness\u201d in the limiting as finding the minimum necessary value of such that Eq. 9 ###reference_### is satisfied, which, for high-order solutions that initially violate the constraints, can be expressed as . However, for nonlinear quasiconcave constraint functionals, the linearization used in the definition of can only ensure that the limiting is sufficient, i.e., ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Proposed modifications",
|
| 39 |
+
"text": "The primary motivation of this work is to introduce a modification to the definition of such that exactness can also be ensured for nonlinear functionals, reducing unnecessary numerical dissipation in the limiting procedure. Exactness for the limiting factor can be represented as finding the minimum necessary value of such that constraints are satisfied, i.e.,\nwhich is equivalent to finding the spatial maximum of the necessary limiting factor at every point in the element, i.e.,\nTo achieve this, we propose replacing the linearized formulation of in Eq. 10 ###reference_###, denoted by , with a better (nonlinear) approximation of the necessary limiting factor, denoted by , which mimics the above condition as\nIt can then be trivially shown following the proof in Dzanic [12 ###reference_b12###] that setting the limiting factor as Eq. 12 ###reference_### using this modified formulation ensures if the high-order solution is bounds-violating and if the high-order solution is bounds-preserving, achieving exactness for arbitrary nonlinear constraint functionals. Furthermore, it also ensures continuity in as when .\nHowever, this formulation requires solving an intersection problem for the zero contour of the constraint functional. For linear constraints, it can be seen that this reduces to the linearized formulation, i.e., . For quadratic constraints such as pressure, this higher-order limiting approach also admits an analytic solution, where can be computed as the (positive) root of the quadratic equation\nwith the coefficients taking on the values\nfor positive pressure constraints, where . This quadratic formulation is similarly seen in the works of Kuzmin and Lohmann [2 ###reference_b2###] and Zhang and Shu [16 ###reference_b16###]. Similar closed-form expressions can be used for higher-order constraints (e.g., cubic) which admit analytic solutions to the intersection problem in Eq. 13 ###reference_###.\nIf one also wants to ensure exactness for nonlinear constraints such as entropy which do not have an analytic solution to the intersection problem, it can be straightforwardly solved using simple iterative root-bracketing approaches such as the bisection or Illinois method, which is similar to discrete limiting approaches with entropy-based constraints [17 ###reference_b17###]. The quasiconcavity of the constraint functionals ensures that this root-bracketing problem is well-behaved, i.e., the is solution is bounded by and is unique. Furthermore, one can apply a stricter upper bound for the initial bracket by using the linearized formulation in Eq. 11 ###reference_###, further reducing the computational cost."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Implementation",
|
| 45 |
+
"text": "The modified functional was implemented within the same numerical framework as in Dzanic [12 ###reference_b12###] and closely follows the original work, using identical parameters and optimization approaches. This approach is summarized as follows, but for more information, the reader is referred to Section 3 of Dzanic [12 ###reference_b12###]. The solution was first converted into a modal basis in monomial form for fast evaluation at arbitrary locations. The initial guess for constraint functional for the optimization process was evaluated at the solution nodes (e.g., Gauss\u2013Lobatto nodes) as well as the volume/surface quadrature nodes (e.g., Gauss\u2013Legendre nodes), the minimum of which was taken as the initial point for optimization. We note that the distinction between solution nodes and quadrature nodes is not necessarily present in collocated flux reconstruction schemes with closed solution nodes, but we evaluate at these nodes regardless to mimic standard nodal DG approaches. Optimization was performed using two iterations of the Newton\u2013Raphson method with a fallback on adaptive gradient descent with backtracking line search in regions where Newton\u2013Raphson is ill-suited. The Jacobian and Hessian of the constraint functionals were computed numerically, and a bound on the true minimum of the constraint functional was extrapolated at the end of the optimization steps. For the gas dynamics equations, the constraints were enforced sequentially, first on density, then pressure, then entropy, to ensure that the constraint functionals were well-behaved. Minimum density and pressure values were set as . A minimum tolerance was set as , and the limiting factor was explicitly set as when to avoid numerically undefined behavior.\nSome further implementation details are necessary for the proposed modification to the approach. For pressure constraints, it is necessary to compute as the root of the quadratic polynomial. It can be shown that in locations where (i.e., where ) and under the assumption that , the discriminant of the quadratic equation is positive and only one of the roots, given by\nyields a limiting factor . In practice, care must be taken to avoid numerical round-off errors in calculating this root, namely by enforcing a floor value of zero for the discriminant. The edge case of is taken care of in a similar manner to the edge case of , where the limiting factor is explicitly set as when . Therefore, for computational purposes, the limiting factor is computed as\nFor the entropy constraints, 5 iterations of the Illinois method were used to compute in locations where . The initial lower (bounds-violating) bracket was set as with , and the upper (bounds-preserving) bracket was set as with (recall that the linearized formulation always yields an upper bound on the exact limiting factor necessary). At each iteration, the next guess was computed as\nand the brackets were updated as\nif and\nif . At the end of the root-finding process, the solution was set to the upper bound, i.e., , to ensure that the bounds-preserving estimate of the limiting factor was chosen."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Results",
|
| 51 |
+
"text": "To highlight the improvements of the proposed modification, we consider first a pathological example of limiting a static discontinuity within one element, where a discontinuity is placed in the center of the element with the left/right states set as\nA 9th-order polynomial DG approximation was initialized by interpolating the solution on the Gauss\u2013Lobatto nodes spaced along the domain , and constraints were enforced for positivity of density/pressure as well as a minimum entropy principle. An arbitrary minimum entropy value was set as . These conditions yield various properties for the unlimited solution to showcase the behavior of the new approach, with a continuously bounds-preserving density field, a discretely (but not continuously) bounds-preserving pressure field, and a discretely bounds-violating entropy field. The minimum entropy value was chosen to demonstrate the nonlinear limiting approach for bounds-violating solution as well as to mimic a condition encountered in time-dependent simulations of hyperbolic conservation laws, where entropy bounds (which might be computed from the previous time step) are violated by the discrete solution at the next time step [6 ###reference_b6###, 11 ###reference_b11###].\nExact solutionUnlimited solutionLinear limitingNonlinear limiting\n\nAs the density field was already continuously bounds-preserving, the limiter was first applied to the pressure field. A comparison of the linearized formulation and the proposed nonlinear formulation is shown in Fig. 1 ###reference_###. It can be seen that the proposed nonlinear formulation drastically reduces the numerical dissipation in the scheme, with the limited solution significantly closer to the unlimited solution in comparison to the linearized formulation. Furthermore, it can be seen that the new limiting approach is exact in the sense that the minimum pressure of the limited solution was on the order of . Similar results could be seen for the entropy constraints, also shown in Fig. 1 ###reference_###. Due to the stronger nonlinearity of the constraint functional, the linearized approach showed even more unnecessary dissipation than with the pressure constraint, whereas the proposed nonlinear approach was exact and showed a significant reduction in the numerical dissipation.\nA more quantitative evaluation of the proposed modification was performed with a near-vacuum isentropic Euler vortex which possesses an analytic solution for comparison. The problem consists of a smooth vortex on a periodic domain , with the initial solution set as\nThe parameters are set to , , and , which yield a near-vacuum state at the vortex peak with a minimum density of and minimum pressure of . One flow-through of the domain () was computed using both a and approximation using meshes of varying resolution with only positivity-preserving constraints, after which the norm of the pressure error (computed at solution nodes) was compared from the solutions obtained using the original linear limiting approach and the proposed nonlinear limiting approach. The comparisons for both the and approximations are shown in Section 5 ###reference_### and Section 5 ###reference_###, respectively. It can be seen that the proposed modification significantly reduces the overall error in the pressure field, with both approximation orders showing decreases between across the varying levels of mesh resolution. These results highlight the reduced numerical dissipation that stems from the modified constraint functional proposed in this note. Furthermore, the imposition of exact pressure constraints (as opposed to a linearized approximation) can have further benefits in terms of the computational cost as the reduced pressure (i.e., reduced speed of sound) can increase the maximum admissible explicit time step for near-vacuum problems. A simple computational cost comparison (at a fixed time step) was performed for the near-vacuum isentropic Euler vortex problem using a approximation with elements on one NVIDIA V100 GPU in terms of the absolute wall clock time. As the added effort of analytically computing was essentially negligible compared to computing , the computational cost of the approach with the proposed modification was effectively identical to the linearized method, both with a computational cost increase of approximately over a discrete limiting approach. When was instead computed numerically (which is not necessary for the pressure constraints used for this example but is simply presented for comparison), the overall computational cost increase in comparison to a discrete limiting method was then . We remark here that the relatively marginal difference between the analytic and numerical nonlinear approaches can largely be attributed to the efficacy of GPU computing for local compute-intensive operations.\ntable Comparison of the norm of the pressure error using linear and nonlinear limiting for the near-vacuum isentropic Euler vortex after one flow-through of the domain () computed with a approximation.\ntable Comparison of the norm of the pressure error using linear and nonlinear limiting for the near-vacuum isentropic Euler vortex after one flow-through of the domain () computed with a approximation."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "6",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Concluding remarks",
|
| 57 |
+
"text": "We proposed an improvement to the continuously bounds-preserving limiting approach presented in Dzanic [12 ###reference_b12###], which allows for the approach to achieve the exact amount of limiting necessary for arbitrary nonlinear quasiconcave constraint functionals as opposed to just a sufficient amount. The modification relies on replacing the linearization in limiting functional with an intersection/root-finding problem, which may be computed analytically for some constraints and numerically for others. Some examples were shown for the compressible Euler equations, showing the reduced numerical dissipation and increased accuracy of the proposed approach."
|
| 58 |
+
}
|
| 59 |
+
],
|
| 60 |
+
"appendix": [],
|
| 61 |
+
"tables": {},
|
| 62 |
+
"image_paths": {},
|
| 63 |
+
"validation": true,
|
| 64 |
+
"references": [
|
| 65 |
+
{
|
| 66 |
+
"1": {
|
| 67 |
+
"title": "On maximum-principle-satisfying high order schemes for scalar conservation laws.",
|
| 68 |
+
"author": "Xiangxiong Zhang and Chi-Wang Shu.",
|
| 69 |
+
"venue": "Journal of Computational Physics, 229(9):3091\u20133120, May 2010.",
|
| 70 |
+
"url": null
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"2": {
|
| 75 |
+
"title": "Synchronized slope limiting in discontinuous Galerkin methods for the equations of gas dynamics.",
|
| 76 |
+
"author": "Dmitri Kuzmin and Christoph Lohmann.",
|
| 77 |
+
"venue": "Technische Universit\u00e4t Dortmund, Fakult\u00e4t f\u00fcr Mathematik, 2016.",
|
| 78 |
+
"url": null
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"3": {
|
| 83 |
+
"title": "High-order local maximum principle preserving (MPP) discontinuous Galerkin finite element method for the transport equation.",
|
| 84 |
+
"author": "R. Anderson, V. Dobrev, Tz. Kolev, D. Kuzmin, M. Quezada de Luna, R. Rieben, and V. Tomov.",
|
| 85 |
+
"venue": "Journal of Computational Physics, 334:102\u2013124, April 2017.",
|
| 86 |
+
"url": null
|
| 87 |
+
}
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"4": {
|
| 91 |
+
"title": "Sparse invariant domain preserving discontinuous Galerkin methods with subcell convex limiting.",
|
| 92 |
+
"author": "Will Pazner.",
|
| 93 |
+
"venue": "Computer Methods in Applied Mechanics and Engineering, 382:113876, August 2021.",
|
| 94 |
+
"url": null
|
| 95 |
+
}
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"5": {
|
| 99 |
+
"title": "Monolithic convex limiting in discontinuous Galerkin discretizations of hyperbolic conservation laws.",
|
| 100 |
+
"author": "Hennes Hajduk.",
|
| 101 |
+
"venue": "Computers & Mathematics with Applications, 87:120\u2013138, April 2021.",
|
| 102 |
+
"url": null
|
| 103 |
+
}
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"6": {
|
| 107 |
+
"title": "Positivity-preserving entropy-based adaptive filtering for discontinuous spectral element methods.",
|
| 108 |
+
"author": "T. Dzanic and F.D. Witherden.",
|
| 109 |
+
"venue": "Journal of Computational Physics, 468:111501, November 2022.",
|
| 110 |
+
"url": null
|
| 111 |
+
}
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"7": {
|
| 115 |
+
"title": "Subcell limiting strategies for discontinuous Galerkin spectral element methods.",
|
| 116 |
+
"author": "Andr\u00e9s M. Rueda-Ram\u00edrez, Will Pazner, and Gregor J. Gassner.",
|
| 117 |
+
"venue": "Computers & Fluids, 247:105627, October 2022.",
|
| 118 |
+
"url": null
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"8": {
|
| 123 |
+
"title": "Analysis on physical-constraint-preserving high-order discontinuous Galerkin method for solving Kapila\u2019s five-equation model.",
|
| 124 |
+
"author": "Fan Zhang and Jian Cheng.",
|
| 125 |
+
"venue": "Journal of Computational Physics, 492:112417, November 2023.",
|
| 126 |
+
"url": null
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"9": {
|
| 131 |
+
"title": "A positivity preserving strategy for entropy stable discontinuous Galerkin discretizations of the compressible Euler and Navier-Stokes equations.",
|
| 132 |
+
"author": "Yimin Lin, Jesse Chan, and Ignacio Tomas.",
|
| 133 |
+
"venue": "Journal of Computational Physics, 475:111850, February 2023.",
|
| 134 |
+
"url": null
|
| 135 |
+
}
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"10": {
|
| 139 |
+
"title": "High-order methods for hypersonic flows with strong shocks and real chemistry.",
|
| 140 |
+
"author": "Ahmad Peyvan, Khemraj Shukla, Jesse Chan, and George Karniadakis.",
|
| 141 |
+
"venue": "Journal of Computational Physics, 490:112310, October 2023.",
|
| 142 |
+
"url": null
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"11": {
|
| 147 |
+
"title": "Positivity-preserving and entropy-bounded discontinuous Galerkin method for the chemically reacting, compressible Euler equations. Part i: The one-dimensional case.",
|
| 148 |
+
"author": "Eric J. Ching, Ryan F. Johnson, and Andrew D. Kercher.",
|
| 149 |
+
"venue": "Journal of Computational Physics, 505:112881, May 2024.",
|
| 150 |
+
"url": null
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"12": {
|
| 155 |
+
"title": "Continuously bounds-preserving discontinuous Galerkin methods for hyperbolic conservation laws.",
|
| 156 |
+
"author": "Tarik Dzanic.",
|
| 157 |
+
"venue": "Journal of Computational Physics, 508:113010, July 2024.",
|
| 158 |
+
"url": null
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"13": {
|
| 163 |
+
"title": "Shock capturing by Bernstein polynomials for scalar conservation laws.",
|
| 164 |
+
"author": "Jan Glaubitz.",
|
| 165 |
+
"venue": "Applied Mathematics and Computation, 363:124593, December 2019.",
|
| 166 |
+
"url": null
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"14": {
|
| 171 |
+
"title": "Maps of convex sets and invariant regions for finite-difference systems of conservation laws.",
|
| 172 |
+
"author": "Hermano Frid.",
|
| 173 |
+
"venue": "Archive for Rational Mechanics and Analysis, 160(3):245\u2013269, November 2001.",
|
| 174 |
+
"url": null
|
| 175 |
+
}
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"15": {
|
| 179 |
+
"title": "Nodal Discontinuous Galerkin Methods.",
|
| 180 |
+
"author": "Jan S. Hesthaven and Tim Warburton.",
|
| 181 |
+
"venue": "Springer New York, 2008.",
|
| 182 |
+
"url": null
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"16": {
|
| 187 |
+
"title": "On positivity-preserving high order discontinuous Galerkin schemes for compressible Euler equations on rectangular meshes.",
|
| 188 |
+
"author": "Xiangxiong Zhang and Chi-Wang Shu.",
|
| 189 |
+
"venue": "Journal of Computational Physics, 229(23):8917\u20138934, November 2011.",
|
| 190 |
+
"url": null
|
| 191 |
+
}
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"17": {
|
| 195 |
+
"title": "Invariant domain preserving discretization-independent schemes and convex limiting for hyperbolic systems.",
|
| 196 |
+
"author": "Jean-Luc Guermond, Bojan Popov, and Ignacio Tomas.",
|
| 197 |
+
"venue": "Computer Methods in Applied Mechanics and Engineering, 347:143\u2013175, April 2019.",
|
| 198 |
+
"url": null
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
],
|
| 202 |
+
"url": "http://arxiv.org/html/2404.12965v2"
|
| 203 |
+
}
|
20240819/2405.11715v2.json
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Semantic Trajectory Data Mining with LLM-Informed POI Classification",
|
| 3 |
+
"abstract": "Human travel trajectory mining is crucial for transportation systems, enhancing route optimization, traffic management, and the study of human travel patterns. While previous studies have primarily focused on spatial-temporal information, the integration of semantic data has been limited, leading to constraints in efficiency and accuracy. Semantic information, such as activity types inferred from Points of Interest (POI) data, can significantly enhance the quality of trajectory mining. However, integrating these insights is challenging, as many POIs have incomplete feature information, and learning-based POI algorithms require the integrity of datasets to do the classification. In this paper, we introduce a novel pipeline for human travel trajectory mining, annotating GPS trajectories with POIs and visit purpose. Our approach first leverages the strong inferential and comprehension capabilities of large language models (LLMs) to link POI with activity types and then uses a Bayesian-based algorithm to infer activity for each stay point in a trajectory. In our evaluation using the OpenStreetMap POI dataset, our approach achieves a 93.4% accuracy and a 96.1% F-1 score in POI classification, and a 91.7% accuracy with a 92.3% F-1 score in activity inference.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The rapid development of Internet-of-Thing (IoT) technology and applications facilitates the evolution of intelligent transportation systems and brings a new era of data collection [1 ###reference_b1###, 2 ###reference_b2###]. This flourish yields an abundance of trajectory data from a wide range of connected devices and allows us to explore human travel behavior with additional detail and accuracy. The advent of GPS-enabled mobile devices has revolutionized the tracking of individuals, vehicles, trains, and even animals through the collection of digital traces or trajectories [3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###], enabling the study of travel behaviors.\nHowever, current GPS-based datasets contain only spatial-temporal information, which limits their ability to fully address the complexities of mobility behavior studies, especially when it comes to understanding human mobility. For instance, these datasets do not capture the travel intentions behind each stay point, as human travel often involves specific purposes and underlying interdependencies. Therefore, to model human mobility patterns more accurately, there is a growing demand to integrate GPS-based trajectory data with semantic information. As pointed out in [7 ###reference_b7###], this integration facilitates applications in traffic management, disease analysis, and human movement studies.\nAn integrated dataset combining location trajectories with semantic information would ideally include not only the spatial-temporal data but also information about nearby Points of Interest (POIs), the purpose of visits, and other contextual details. Such a dataset would provide a more comprehensive view of human mobility patterns, allowing for deeper insights into travel behaviors and motivations.\nTo create such integrated datasets and effectively mine trajectory data, accurate POI classification is crucial. The effectiveness of trajectory mining heavily relies on the accuracy of POI classification, which is also crucial for applications like navigation, local searches, and analyzing human travel patterns [8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###]. A reliable POI classification algorithm can notably enhance the usability, functionality, and effectiveness of these applications by ensuring the quality of POI data categorization. Despite their importance, existing learning-based classification methods suffer from considerable variability within open-source datasets like OpenStreetMap (OSM) [11 ###reference_b11###], and discrepancies widen when multiple data sources are considered. This inconsistency not only complicates schema alignment but also significantly limits the amount and quality of usable data. Efforts to synchronize these disparate data schema are time-consuming and often result in loss of data granularity and reliability.\nIn this paper, we introduce a data mining frame work to annotate trajectory with semantic information. Compared to existing literature, the main contributions of this paper are:\nWe introduce a novel data mining framework to annotate trajectories with activities, which integrates LLM-based POI classification with a probabilistic activity inference algorithm. This semantic annotation of time series data bridges spatial-temporal and natural language analyses in trajectory mining, opening new avenues for mobility research.\nThe proposed framework is adaptable across various regional POI or trajectory datasets without additional training. It effectively handles POI classification even with open-source datasets that have incomplete data, eliminating the need for high data integrity.\nLeveraging the semantic information inferred from POIs provided by LLM outputs, our activity inference algorithm achieves precise point-level inference. To our knowledge, this is the first application of LLM to POI classification."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Literature Review",
|
| 15 |
+
"text": "###figure_1###"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A POI Classification",
|
| 21 |
+
"text": "The classification of POIs involves assigning categories to each observation based on location types (restaurants, schools) or associated activities (shopping, working). Learning-based methods like SVM initially addressed this by leveraging additional textual information, achieving up to 73% accuracy on the Yelp dataset with 20 different categories [12 ###reference_b12###]. Integrating spatial, textual, and property features further enhanced classification, with methods like k-Nearest Neighbors achieving up to 91% top-10 accuracy in the OSM dataset with 14 categories [13 ###reference_b13###].\nAs the number of features expands, assessing feature importance for classification has become crucial. Methods like Linear Discriminant Analysis, kNN, and Random Forest are used to evaluate feature significance in classification tasks [14 ###reference_b14###]. Research shows that feature importance varies by geographical location and category, complicating the creation of a universally effective algorithm for diverse datasets.\nPrevious methods predominantly used learning-based algorithms, improving performance via feature extension and data from external sources. In practice, missing features and the challenge of synchronizing diverse data sources complicate classification. Labeling variability, such as OSM datasets from Egypt that feature names exclusively in Arabic, limits the effectiveness of these algorithms.\nThese challenges highlight the need for more robust and adaptable algorithms. LLMs trained with diverse data including internet content, excel in various applications with their strong generalization and inference capabilities [15 ###reference_b15###]. Our approach utilizes LLMs to address these issues, providing an efficient method for classifying POIs across diverse datasets without requiring complete data or additional feature extensions."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II-B Activity Inference",
|
| 27 |
+
"text": "Trajectory data from GPS devices documents daily trips as sequences of geo-coordinates, revealing insights into human activities like shopping or dining. However, inferring trip purposes is challenging due to GPS inaccuracies and the vague information in POI datasets [16 ###reference_b16###].\nStudies have used spatial and temporal features from trajectories to infer travel behaviors. While rule-based methods accurately predict mandatory activities like home and work due to strong periodicity, they struggle with complex non-mandatory activities like social visits [17 ###reference_b17###, 18 ###reference_b18###]. To improve inference, some methods integrate POI data, using GNN and Dynamic Bayesian Networks to better predict activity types, though precision remains an issue due to the coarse data [19 ###reference_b19###, 20 ###reference_b20###].\nTo address these issues, our model is designed to accurately infer activity types using point-level POIs, rather than broader zonal data. Leveraging prior knowledge of activity temporal distributions improves accuracy significantly. We overcome these challenges with a Bayesian-based algorithm that incorporates the context of nearby POIs."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "III Methodology",
|
| 33 |
+
"text": "The architecture of our trajectory mining framework is illustrated in Fig. 1 ###reference_###. We first perform POI classification by reformulating the task description and candidate activity types into natural language representations. This information is then combined with the POI observations to determine the three most likely activity types for each POI using LLM. Meanwhile, we engage in prompt engineering to refine the quality of outputs based on the model\u2019s performance. Following this, we integrate stay point [21 ###reference_b21###] information and implement a Bayesian-based inference algorithm to accurately associate each stay point with potential POIs and activity types in the human trajectory data."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "III-A Problem Formulation",
|
| 39 |
+
"text": "Given the POIs and stay points from trajectory data, the task is to infer activity types for stay points in the trajectory data, and the mathematical expressions and definitions are provided:\nPOI: The point of interest observations are denoted as , where each observation consists of , indicating the name, longitude, latitude and available features () of the POI.\nTrajectory: The trajectory data traces are denoted as .\nStay Point: For each trajectory of person , the stay points are defined as locations where the person remains stationary for a duration of time. A trajectory could have multiple stay points . Each stay point contains information of Lon,lat, representing the start time, end time, longitude, and latitude of the stay point.\nGiven information of each stay points and POI records , the goal is to annotate each stay point with an activity for the whole trajectory dataset ."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III-B POI Classification",
|
| 45 |
+
"text": "###figure_2### Different from learning-based algorithms that derive relationships between POI features and categories from labeled data samples, LLM leverages its powerful inferencing capabilities to respond to open-ended text-based questions without requiring additional training. In our approach, we reframe the classification task into a text-based question format where LLMs excel and design efficient prompts that precisely describe the problem, allowing the LLM to interpret and classify POIs effectively based on the context provided in the prompts.\nAs shown in Fig. 3 ###reference_### as an example, the prompt is composed of three distinct components:\nTask Description: Outlines the POI classification task, detailing input data and the desired output structure. This sets the framework for the LLM\u2019s approach to data analysis and classification.\nCategory Description: Provides a detailed description of each target category, including definitions and examples. This helps guide the LLM in accurately classifying POIs into relevant categories.\nPOI Observation: Describes dataset features and their values in natural language format, ensuring the model could fully understand and process each POI\u2019s characteristics as described in the data.\nNotably, prompt fine-tuning can be utilized to meet specific needs when working with diverse datasets or various category sets, as LLMs are not limited by text input formats. For instance, in the Egyptian POI collection where some POI names are provided only in Arabic, we can enhance the model\u2019s understanding by explicitly stating in the prompt like \u201csome names in the \u2018name\u2019 column are in Arabic.\u201d Additionally, for datasets that only contain POIs from public spaces, we can point out the source of the dataset in the prompt, and amenities like \u201ctoilets\u201d will not be classified as \u201chome activity.\u201d Moreover, when dealing with real-world datasets where features often missing such as a POI labeled only with the amenity type \u201cvisitor parking\u201d, it can be challenging to determine whether this POI belongs to the category \u201cvisit friends\u201d or \u201cpick up/drop off.\u201d For such cases, we can refine the prompt to guide the LLM in identifying the three most relevant categories for the POI, complete with probability scores for each. This tailored approach guides the model to provide meaningful classifications even with limited data, and these probabilities can be further utilized in other tasks, such as activity inference."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-C Activity Inference",
|
| 51 |
+
"text": "Activity inference involves considering many factors, such as the time of the day, the category of the visited location, and the functionality of the location, requiring detailed information. However, the privacy implications of the use of location-based services [22 ###reference_b22###], especially identifying specific POIs in the trajectory, are raising serious concerns. It could potentially disclose any specific user\u2019s daily activities, interests, and even personal habits. Thus, it is not appropriate to match the stay points in real trajectory data with specific POIs to conduct the inference. Therefore, an effective activity inference model must carefully balance the inclusion of detailed information with privacy safeguards. In the following parts, we elaborate a rule-based algorithm for mandatory activities inference and a probabilistic model for non-mandatory activities inference as shown in Fig. 3.\nMandatory Activity Inference:\n###figure_3### Our mandatory activity inference algorithm identifies three primary activities: Home, Work, and School, using a rule-based approach [18 ###reference_b18###]. The Home activity is determined by the stay point with the highest visit frequency during off-hours (7 pm to 8 am). Work activity is inferred by analyzing stay points not marked as Home, focusing on those with the highest visit frequency and travel distance from Home during typical work hours on weekdays (8 am to 7 pm). For Schools, stay points closest to \u2018education\u2019 POIs with the most travel from Home during weekdays\u2019 school hours are selected (8 am to 7 pm). This algorithm utilizes stay point data and proximity to relevant POIs to categorize mandatory activities effectively.\nNon-mandatory Activity Inference:\nIn contrast, non-mandatory activities, which vary more in periodicity and frequency, require a probabilistic approach for inference. We calculate the likelihood of activities at a stay point by considering the nearby POIs, the noise-adjusted radius, and the start time of the stay.\nThe probability of an activity type can be formulated as the equation below, given the surrounding POIs and the stay point start time :\nNext, we use Bayes\u2019 theorem to refine the probability given and :\nSubstituting this back into our initial equation, we obtain the final expression to calculate the probability of a certain activity :\nAssuming the start time and a specific is independence, becomes a constant value, and we can obtain:\nEach POI is assigned top three possible activity types and their probability by LLM. can be estimated from the 2017 National Household Travel Survey\n(NHTS) California Add-on dataset [23 ###reference_b23###]. The total conditional probability can be represented in a matrix (as in Table III ###reference_###), listing all possible probabilities for each POI and their corresponding activity types. The model then selects the highest probability among the possible POI and activity type combinations as the inferred activity result."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "IV Experiment",
|
| 57 |
+
"text": ""
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "IV-A Dataset",
|
| 63 |
+
"text": "To evaluate our approach across various contexts and assess its effectiveness, we use open-source data from Los Angeles (LA) County and Egypt, sourced from OSM. This data enables us to rigorously test the performance of our POI classification algorithm.\nThe dataset is structured as a directed, connected network with POIs, each associated with metadata tags found on the network\u2019s edges or vertices. Given that OSM is a free, open geographic database maintained by volunteers through open collaboration, the data exhibits significant diversity in terms of quality, format, and variety. This includes a range of location-specific details such as buildings, amenities, and infrastructure elements like roads and railways. While it offers higher information entropy to the LLM, it also complicates predictions due to ambiguous or contradictory labels. For instance, a church in OSM could be labeled as a place of worship, a tourist attraction, and an office building simultaneously. While this might seem contradictory at first glance, such a POI is applicable in all three contexts, whether an agent is going for worship, tourism, or work.\nDespite the crucial \u201cnames\u201d feature being present, 89% of the content in other features such as \u201camenity,\u201d \u201cbuilding type,\u201d and \u201cland use\u201d is missing in the Egypt dataset, and similarly, 91% is missing in the LA dataset. Utilizing datasets like OSM enables us to evaluate the resilience of our methodology in challenging conditions due to the inconsistent labeling style and incomplete data in the real-world scenario."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "IV-B Evaluation Setting",
|
| 69 |
+
"text": "Our study covers 15 activity categories [23 ###reference_b23###] to capture a wide range of daily activities, ensuring the dataset reflects the diversity of human behavior, as detailed in Table I ###reference_###.\n###table_1###"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2.1",
|
| 73 |
+
"parent_section_id": "4.2",
|
| 74 |
+
"section_name": "IV-B1 POI classification",
|
| 75 |
+
"text": "Evaluating the performance of our POI classification method presents challenges typical of an unsupervised learning setting, where no pre-existing ground truth is available. To address this, we randomly sample 500 observations from each POI dataset in both Egypt and LA County and create a manual ground truth. The accuracy of our method was then benchmarked against this human-labeled ground truth. To minimize labeling bias, five volunteers are enlisted to categorize the labels under uniform standards. Finally, we employ the language models \u2018gpt-3.5-turbo (gpt3.5)\u2019 and \u2018gpt-4 (gpt4)\u2019 from OpenAI, prompting them to identify the top three most relevant categories for each POI, along with the associated probabilities."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.2.2",
|
| 79 |
+
"parent_section_id": "4.2",
|
| 80 |
+
"section_name": "IV-B2 Activity Inference",
|
| 81 |
+
"text": "After completing the POI classification using the \u2018gpt-3.5\u2019 model, we obtain a dataset of 85,696 POIs, each annotated with possible activity categories, from LA County. This dataset serves as the input for the activity inference process, as illustrated in Fig. 1 ###reference_###.\nTo assess the efficacy of our proposed activity inference model, we curate a test trajectory dataset by extracting activities from the NHTS California Add-on dataset [23 ###reference_b23###], focusing on LA County. This dataset provides detailed daily travel trajectories, with each trajectory consisting of activities and a rough location at the zonal level. However, due to the lack of POIs associated with these activities, we utilize LLM for preliminary POI selection from the rough location zone, followed by manual curation to rectify any erroneous assignments. It is important to note the limitations of GPS-enabled smartphones, which typically provide accurate location data within a 4.9-meter radius under ideal conditions [24 ###reference_b24###]. However, accuracy diminishes in urban environments near structures like buildings, bridges, and dense foliage. To evaluate the model\u2019s performance and robustness against real-world conditions, we introduce noise at three levels: 5 meters, 10 meters, and 20 meters. The noise, simulated using a Gaussian kernel, is added to the latitude and longitude coordinates of the POIs. Subsequent to this augmentation, our test dataset comprises 362 individuals and 2,007 activities, consisting of 1,724 mandatory activities and 283 non-mandatory activities."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "IV-C Result",
|
| 87 |
+
"text": ""
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.3.1",
|
| 91 |
+
"parent_section_id": "4.3",
|
| 92 |
+
"section_name": "IV-C1 POI Classification",
|
| 93 |
+
"text": "###figure_4### Considering there is a significant amount of incomplete features in the dataset. For observations that only have limited features available, such as a POI with only a name labeled like \u201cVolkswagen\u201d, it\u2019s hard to indicate whether the individual is there for car purchasing (category 5), auto maintenance (category 6), or is simply an employee at the location (category 2). As a result, in the calculation of overall accuracy and macro F-1 score, we consider the classification correct if one of the three predicted activities from the model matches the manually labeled activity category. Additionally, we calculate the hit rate for each position of the predicted activity code, recorded as \u201cHit@n\u201d. The model returns these codes in decreasing order of probability, as specified in the prompt.\n###table_2### As shown in Table II ###reference_###, our model achieves a 93.4% accuracy for the Egypt dataset and 91.4% accuracy for the LA county dataset, with F-1 scores of 96.10% and 92.9% respectively using the gpt4 model. This demonstrates its superiority and robustness compared with previous approaches.\nCompared to gpt3.5, the more advanced gpt4 model outperforms in both accuracy and F-1 score, especially for challenging datasets like LA country where more feature contents are missing. Besides, the analysis of the hit rate of the three predicted categories shows that while the \u201cHit @1\u201d rate dominates when using both models, the \u201cHit @1\u201d rate of gpt4 is approximately 10-13% higher than that of gpt3.5. This improvement underscores the advanced model\u2019s enhanced comprehension abilities, leading to more robust performance in classification.\nSome instances of the classification results are listed in Fig. 4 ###reference_###. For POI 1, with only the name \u201cKFC\u201d, the model confidently categorized category 7 (Buy meals) as its first match with a probability of 0.7, and the semantically related \u201cbuy\u201d category 5 (Buy goods) as the second choice. For POI 2 with a clear description, the model predicted category 10 (Exercise) as the best match with a 0.7 probability, and category 9 (Recreational activities) as the next likely category with a 0.2 probability. In the case of POI 3, a cafe, the model assigned close probabilities between categories 7 (Buy meals) and 9 (Recreational), since a cafe usually includes both the dining and recreational social attributes. For POI 4 and POI 5 whose names were documented in Arabic(\u201cRoman theater\u201d and \u201cDr. Rafiq Suleiman\u2019s clinic\u201d), the model successfully understood the meaning and labeled them with corresponding categories. Besides, 14 (Something else) was chosen as the complementary third choice in all cases above as there are no other related categories. These impressing instances again demonstrate the strong comprehension ability of our approach, offering logically predicted probabilities that enhance subsequent activity inference tasks."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.3.2",
|
| 97 |
+
"parent_section_id": "4.3",
|
| 98 |
+
"section_name": "IV-C2 Activity Inference",
|
| 99 |
+
"text": "In evaluating our activity inference model on the test dataset, we utilize accuracy @1, @2, and @3 under 5m, 10m, and 20m levels of noise as evaluation metrics. These metrics measure the model\u2019s performance in predicting activity types for stay points. Accuracy @1 represents the proportion of stay points where the correct activity type is the top prediction, while accuracy @2 and @3 indicate the percentages where the correct activity type falls within the top two and top three predictions, respectively.\nTable III ###reference_### provides examples of inference and demonstrate the probabilities of activity types and possible visiting POIs. In the example of clear inference, there are two possible POIs near the stay point in the trajectory data. By comprehensively considering the probability of categories and the time of the activity, the model is able to choose \u201dBuy goods\u201d as the possible activity category, which is consistent with the ground truth. Moreover, the probability of this correct category is significantly higher than in other cases. In contrast, in the example of ambiguous inference, the stay point is located in a commercial area mixed with many shopping and dining POIs. In such a scenario, the model finds \u201dBuy goods\u201d and \u201dBuy meals\u201d to be very close, making the situation confusing. The predicted probabilities of \u201dBuy goods\u201d and \u201dBuy meals\u201d are very close, leading to a prediction error. The model, even in ambiguous situations, can still make reasonable guesses, although in the case shown in the figure, the second choice is the most accurate inference. Overall, the model\u2019s inference can have strong distinguishability.\n###figure_5### An example of Clear Inference\n\nActivity for POI\n\n\n\n\nPOI 1 (3.2m)**\n \n\n\n0.424\n\n(Buy goods)\n\n \n\n\n0.167\n\n(Buy services)\n\n \n\n\n0.055\n\n(Something else)\n\n\nPOI 2 (4.8m)\n \n\n\n0.214\n\n(Buy meals)\n\n \n\n\n0.090\n\n(General errands)\n\n \n\n\n0.047\n\n(Something else)\n\n\nAn example of Ambiguous Inference\n\nActivity for POI\n\n\n\n\nPOI 1 (3.3m)\n \n\n\n0.170\n\n(Buy goods)\n\n \n\n\n0.084\n\n(Buy services)\n\n \n\n\n0.029\n\n(Something else)\n\n\nPOI 2 (3.4m)\n \n\n\n0.169\n\n(Buy meals)\n\n \n\n\n0.040\n\n(Buy goods)\n\n \n\n\n0.012\n\n(Recreation)\n\n\nPOI 3 (3.4m)\n \n\n\n0.182\n\n(Buy goods)\n\n \n\n\n0.052\n\n(Buy services)\n\n \n\n\n0.024\n\n(Recreation)\n\n\nPOI 4 (4.2m)\n \n\n\n0.208\n\n(Buy meals)\n\n \n\n\n0.024\n\n(Recreation)\n\n \n\n\n0\n\n(Buy goods)\n* The activity inference algorithm identifies the most likely visited POI within a specified range, accounting for noise and multiple POIs, to determine the final activity type. \n** Distance between the POI and the stay point.\nAccording to Table IV ###reference_###, which outlines the performance metrics for activity inference under varying levels of noise, the algorithm maintains a commendable degree of stability even as the standard deviation (SD) of noise increases. Specifically, for non-mandatory activities, Acc @3 remains above 80%, indicating a resilient performance against the perturbations caused by noise. With a 5-meter standard deviation in noise, the accuracy at Acc @3 is recorded at 88.4%, which diminishes to 84.2% when the noise level is increased to 20 meters. This reduction, while notable, is not drastic, supporting the assertion that the model upholds a stable predictive capability in the face of increasing noise levels. Particularly with respect to GPS trajectory datasets, the model demonstrates robust inference capabilities at a 5-meter noise accuracy. These findings not only validate the stability of the model but also highlight its potential for effective deployment in real-world scenarios where varying degrees of GPS accuracy are a common occurrence.\n###table_3### As shown in Table V ###reference_###, the results indicate that mandatory activities (categories 1-3) exhibit consistent performance due to the deterministic nature of their patterns. On the other hand, activities characterized by more complex and variable patterns present greater challenges for prediction. Despite this, categories 4, 5, 7, 8, 12, and 13 demonstrate commendable accuracy. Notably, categories such as 5 (Buy goods), 12 (Health care), and 13 (Religious activities) show robust performance, which can be attributed to the ample instances available within the test dataset.\nConversely, activities like 11 (Visit friends), 14 (Something else), and 15 (Drop off/Pick up) prove to be more elusive in the context of stay point detection. These activities are less associated with consistent POIs and often fall into the ambiguous situation as shown in Table III ###reference_###, leading to relatively poorer performance. This suggests that a rule-based algorithm is effective for activities with clear-cut patterns.\n###table_4### Finally, the outcome of the proposed framework is the semantic trajectory, as illustrated in Fig. 5 ###reference_###, where each stay point in a trajectory is annotated with the most likely activity. For instance, as shown in Fig. 5 ###reference_### (a), individuals may engage in activities such as commuting between their home and work locations, exercising in the morning, or purchasing goods while en route from work to home. By augmenting stay point data with activity type information, we enhance our understanding of human travel mobility and behavior patterns. Through extensive trajectory mining, we progress from raw GPS data (aggregated to stay points) to refined activity chains, marking a significant advancement in the study of human travel behaviors."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "Conclusion and Future Work",
|
| 105 |
+
"text": "Our study has introduced a novel LLM-based framework for POI classification and activity inference, demonstrating notable improvements over traditional learning-based methods in handling incomplete and varied datasets like those from OpenStreetMap. Our approach, by leveraging the robust inferential capabilities of large language models, has shown exceptional performance with high accuracy and F-1 scores in real-world scenarios across diverse geographic locations. Looking ahead, we aim to enhance the accuracy and robustness of POI classification methods with more sophisticated prompt engineering or the development of a fine-tuned LLM designed specifically for the POI classification task."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "VI Acknowledgement",
|
| 111 |
+
"text": "We want to thank Xiangnan Zhang and Zhaoyi Ye for their contribution to dataset preprocessing."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [],
|
| 115 |
+
"tables": {
|
| 116 |
+
"1": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T1.2.1.1\" style=\"font-size:90%;\">TABLE I</span>: </span><span class=\"ltx_text\" id=\"S4.T1.3.2\" style=\"font-size:90%;\">Activity category code and their corresponding descriptions</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T1.4\">\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.1\">1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.2\">Home</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.3\">2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.4\">Work</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.5\">3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.6\">School</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.1\">4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.2\">Caregiving</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.3\">5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.4\">Buy goods</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.5\">6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.2.6\">Buy services</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.1\">7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.2\">Buy meals</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.3\">8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.4\">General errands</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.5\">9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.3.6\">Recreational</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.1\">10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.2\">Exercise</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.3\">11</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.4\">Visit friends</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.5\">12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.4.6\">Health care</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.1\">13</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.2\">Religious</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.3\">14</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.4\">Something else</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.5\">15</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.4.5.6\">Drop off/Pick up</td>\n</tr>\n</table>\n</figure>",
|
| 118 |
+
"capture": "TABLE I: Activity category code and their corresponding descriptions"
|
| 119 |
+
},
|
| 120 |
+
"2": {
|
| 121 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T2.2.1.1\" style=\"font-size:90%;\">TABLE II</span>: </span><span class=\"ltx_text\" id=\"S4.T2.3.2\" style=\"font-size:90%;\">Performance metrics for POI classification with different models.</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T2.4\">\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.1.1\">Metric</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.2.1\">Egypt(gpt3.5)</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.3.1\">Egypt(gpt4)</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.4.1\">LA(gpt3.5)</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.5.1\">LA(gpt4)</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.4.2.1\">Accuracy</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.2.2\">90.3%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.2.3\">93.4%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.2.4\">82.5%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.2.5\">91.4%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r\" id=\"S4.T2.4.3.1\">Hit @1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.3.2\">61.6%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.3.3\">74.7%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.3.4\">65.2%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.3.5\">75.7%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r\" id=\"S4.T2.4.4.1\">Hit @2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.4.2\">22.6%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.4.3\">9.7%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.4.4\">15.4%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.4.5\">8.7%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r\" id=\"S4.T2.4.5.1\">Hit @3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.5.2\">6.1%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.5.3\">9.0%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.5.4\">1.9%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.5.5\">7.0%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.4.6.1\">F-1 Score</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.4.6.2\">91.50%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.4.6.3\">96.10%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.4.6.4\">82.1%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.4.6.5\">92.9%</td>\n</tr>\n</table>\n</figure>",
|
| 122 |
+
"capture": "TABLE II: Performance metrics for POI classification with different models."
|
| 123 |
+
},
|
| 124 |
+
"3": {
|
| 125 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T3.8.1.1\" style=\"font-size:90%;\">TABLE III</span>: </span><span class=\"ltx_text\" id=\"S4.T3.9.2\" style=\"font-size:90%;\">Conditional Probability of POI-Activity Combination for a Stay Point*</span></figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<p class=\"ltx_p ltx_figure_panel ltx_align_center\" id=\"S4.T3.6\"><span class=\"ltx_text\" id=\"S4.T3.6.6\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T3.6.6.6\" style=\"width:366.1pt;height:320.4pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1,1) ;\">\n<span class=\"ltx_p\" id=\"S4.T3.6.6.6.6\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.7\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t ltx_colspan ltx_colspan_4\" id=\"S4.T3.6.6.6.6.6.6.7.1\">An example of Clear Inference</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.3.3.3.3.3.3.3\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.3.3.3.3.3.3.3.4\">Activity for POI</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.1.1.1.1.1.1.1.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.2.2.2.2.2.2.2.2\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.3.3.3.3.3.3.3.3\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.8.1\">POI 1 (3.2m)**</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.8.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.1.1.1\">0.424</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.2.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.6.6.6.6.6.6.8.2.2.1.2.1.1\">(Buy goods)</span></span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.8.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.8.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.3.2.1.1.1\">0.167</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.3.2.1.2.1\">(Buy services)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.8.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.8.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.4.2.1.1.1\">0.055</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.8.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.8.4.2.1.2.1\">(Something else)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.8.4.3\"></span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.9.1\">POI 2 (4.8m)</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.9.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.9.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.2.2.1.1.1\">0.214</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.2.2.1.2.1\">(Buy meals)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.9.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.9.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.3.2.1.1.1\">0.090</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.3.2.1.2.1\">(General errands)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.9.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.9.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.4.2.1.1.1\">0.047</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.9.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.9.4.2.1.2.1\">(Something else)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.9.4.3\"></span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.10\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t ltx_colspan ltx_colspan_4\" id=\"S4.T3.6.6.6.6.6.6.10.1\">An example of Ambiguous Inference</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.6\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.6.4\">Activity for POI</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.4.4.4.4.4.4.4.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.5.5.5.5.5.5.5.2\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.6.3\"></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.11.1\">POI 1 (3.3m)</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.11.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.11.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.2.2.1.1.1\">0.170</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.2.2.1.2.1\">(Buy goods)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.11.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.11.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.3.2.1.1.1\">0.084</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.3.2.1.2.1\">(Buy services)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.11.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.11.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.4.2.1.1.1\">0.029</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.11.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.11.4.2.1.2.1\">(Something else)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.11.4.3\"></span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.12.1\">POI 2 (3.4m)</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.12.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.12.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.2.2.1.1.1\">0.169</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.2.2.1.2.1\">(Buy meals)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.12.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.12.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.3.2.1.1.1\">0.040</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.3.2.1.2.1\">(Buy goods)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.12.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.12.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.4.2.1.1.1\">0.012</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.12.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.12.4.2.1.2.1\">(Recreation)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.12.4.3\"></span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13\">\n<span class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.13.1\">POI 3 (3.4m)</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.13.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.13.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.2.2.1.1.1\">0.182</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.2.2.1.2.1\">(Buy goods)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.13.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.13.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.3.2.1.1.1\">0.052</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.3.2.1.2.1\">(Buy services)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.13.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.13.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.4.2.1.1.1\">0.024</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.13.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.13.4.2.1.2.1\">(Recreation)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.13.4.3\"></span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14\">\n<span class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.14.1\">POI 4 (4.2m)</span>\n<span class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.14.2\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.2.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.2.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.1.1.1\">0.208</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.2.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.6.6.6.6.6.6.14.2.2.1.2.1.1\">(Buy meals)</span></span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.2.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.14.3\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.3.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.3.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.14.3.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.3.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.3.2.1.1.1\">0.024</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.3.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.3.2.1.2.1\">(Recreation)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.3.3\"></span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T3.6.6.6.6.6.6.14.4\"><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.4.1\"></span> <span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.4.2\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.6.6.6.6.6.6.14.4.2.1\">\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.4.2.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.4.2.1.1.1\">0</span></span>\n<span class=\"ltx_tr\" id=\"S4.T3.6.6.6.6.6.6.14.4.2.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T3.6.6.6.6.6.6.14.4.2.1.2.1\">(Buy goods)</span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T3.6.6.6.6.6.6.14.4.3\"></span></span></span>\n</span></span></span>\n</span></span></span></p>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<p class=\"ltx_p ltx_figure_panel ltx_align_center\" id=\"S4.T3.10\"><span class=\"ltx_text\" id=\"S4.T3.10.1\" style=\"font-size:90%;\">* The activity inference algorithm identifies the most likely visited POI within a specified range, accounting for noise and multiple POIs, to determine the final activity type. \n<br class=\"ltx_break\"/>** Distance between the POI and the stay point.</span></p>\n</div>\n</div>\n</figure>",
|
| 126 |
+
"capture": "TABLE III: Conditional Probability of POI-Activity Combination for a Stay Point*"
|
| 127 |
+
},
|
| 128 |
+
"4": {
|
| 129 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T4.2.1.1\" style=\"font-size:90%;\">TABLE IV</span>: </span><span class=\"ltx_text\" id=\"S4.T4.3.2\" style=\"font-size:90%;\">Performance metrics for non-mandatory activity inference under different levels of noise</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T4.4\">\n<tr class=\"ltx_tr\" id=\"S4.T4.4.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.4.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.4.1.1.1\">SD of noise</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.4.1.2.1\">Type Acc @1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.4.1.3.1\">Type Acc @2</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.4.1.4.1\">Type Acc @3</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T4.4.2.1\">5m</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.2.2\">75.0%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.2.3\">84.1%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.4.2.4\">88.4%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r\" id=\"S4.T4.4.3.1\">10m</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T4.4.3.2\">73.6%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T4.4.3.3\">81.8%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T4.4.3.4\">85.3%</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r\" id=\"S4.T4.4.4.1\">20m</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S4.T4.4.4.2\">71.9%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S4.T4.4.4.3\">80.5%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S4.T4.4.4.4\">84.2%</td>\n</tr>\n</table>\n</figure>",
|
| 130 |
+
"capture": "TABLE IV: Performance metrics for non-mandatory activity inference under different levels of noise"
|
| 131 |
+
},
|
| 132 |
+
"5": {
|
| 133 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T5.2.1.1\" style=\"font-size:90%;\">TABLE V</span>: </span><span class=\"ltx_text\" id=\"S4.T5.3.2\" style=\"font-size:90%;\">Performance metrics for activity inference under 5m noise for different activity types</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T5.4\">\n<tr class=\"ltx_tr\" id=\"S4.T5.4.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T5.4.1.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.1.1\" style=\"font-size:90%;\">Types</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T5.4.1.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.2.1\" style=\"font-size:90%;\">Acc@1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S4.T5.4.1.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.3.1\" style=\"font-size:90%;\">F1 score</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T5.4.1.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.4.1\" style=\"font-size:90%;\">Types</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T5.4.1.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.5.1\" style=\"font-size:90%;\">Acc@1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T5.4.1.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.1.6.1\" style=\"font-size:90%;\">F1 score</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.2.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.1.1\" style=\"font-size:90%;\">1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.2.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.2.1\" style=\"font-size:90%;\">98.3%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T5.4.2.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.3.1\" style=\"font-size:90%;\">98.3%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.2.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.4.1\" style=\"font-size:90%;\">9</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.2.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.5.1\" style=\"font-size:90%;\">63.1%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.2.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.2.6.1\" style=\"font-size:90%;\">74.2%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.3.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.1.1\" style=\"font-size:90%;\">2</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.3.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.2.1\" style=\"font-size:90%;\">87.2%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.3.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.3.1\" style=\"font-size:90%;\">95.1%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.3.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.4.1\" style=\"font-size:90%;\">10</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.3.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.5.1\" style=\"font-size:90%;\">64.7%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.3.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.3.6.1\" style=\"font-size:90%;\">76.1%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.1.1\" style=\"font-size:90%;\">3</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.2.1\" style=\"font-size:90%;\">90.0%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.4.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.3.1\" style=\"font-size:90%;\">95.0%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.4.1\" style=\"font-size:90%;\">11</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.5.1\" style=\"font-size:90%;\">52.2%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.6.1\" style=\"font-size:90%;\">67.3%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.5.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.1.1\" style=\"font-size:90%;\">4</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.5.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.2.1\" style=\"font-size:90%;\">100%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.5.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.3.1\" style=\"font-size:90%;\">98.3%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.5.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.4.1\" style=\"font-size:90%;\">12</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.5.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.5.1\" style=\"font-size:90%;\">100%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.5.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.5.6.1\" style=\"font-size:90%;\">97.2%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.6.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.1.1\" style=\"font-size:90%;\">5</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.6.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.2.1\" style=\"font-size:90%;\">78.5%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.6.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.3.1\" style=\"font-size:90%;\">76.3%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.6.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.4.1\" style=\"font-size:90%;\">13</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.6.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.5.1\" style=\"font-size:90%;\">100%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.6.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.6.6.1\" style=\"font-size:90%;\">100%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.7.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.1.1\" style=\"font-size:90%;\">6</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.7.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.2.1\" style=\"font-size:90%;\">66.7%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.7.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.3.1\" style=\"font-size:90%;\">76.2%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.7.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.4.1\" style=\"font-size:90%;\">14</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.7.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.5.1\" style=\"font-size:90%;\">46.2%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.7.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.7.6.1\" style=\"font-size:90%;\">67.2%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.8.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.1.1\" style=\"font-size:90%;\">7</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.8.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.2.1\" style=\"font-size:90%;\">77.4%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T5.4.8.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.3.1\" style=\"font-size:90%;\">87.1%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.8.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.4.1\" style=\"font-size:90%;\">15</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.8.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.5.1\" style=\"font-size:90%;\">50.1%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.8.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.8.6.1\" style=\"font-size:90%;\">70.6%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.4.9.1\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.9.1.1\" style=\"font-size:90%;\">8</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.4.9.2\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.9.2.1\" style=\"font-size:90%;\">100%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T5.4.9.3\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.9.3.1\" style=\"font-size:90%;\">96.6%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.4.9.4\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.9.4.1\" style=\"font-size:90%;\">Average</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.4.9.5\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.9.5.1\" style=\"font-size:90%;\">91.7%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.4.9.6\" style=\"padding-left:2.0pt;padding-right:2.0pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.9.6.1\" style=\"font-size:90%;\">92.3%</span></td>\n</tr>\n</table>\n</figure>",
|
| 134 |
+
"capture": "TABLE V: Performance metrics for activity inference under 5m noise for different activity types"
|
| 135 |
+
}
|
| 136 |
+
},
|
| 137 |
+
"image_paths": {
|
| 138 |
+
"1": {
|
| 139 |
+
"figure_path": "2405.11715v2_figure_1.png",
|
| 140 |
+
"caption": "Figure 1: Architecture of the proposed framework from POI classification (left) to activity Inference (right).",
|
| 141 |
+
"url": "http://arxiv.org/html/2405.11715v2/extracted/5800968/figures/flow_diagram_new.png"
|
| 142 |
+
},
|
| 143 |
+
"2": {
|
| 144 |
+
"figure_path": "2405.11715v2_figure_2.png",
|
| 145 |
+
"caption": "Figure 2: Example of the prompt and output from LLM.",
|
| 146 |
+
"url": "http://arxiv.org/html/2405.11715v2/extracted/5800968/figures/Architecture.png"
|
| 147 |
+
},
|
| 148 |
+
"3": {
|
| 149 |
+
"figure_path": "2405.11715v2_figure_3.png",
|
| 150 |
+
"caption": "Figure 3: Flow chart of activity inference procedure.",
|
| 151 |
+
"url": "http://arxiv.org/html/2405.11715v2/extracted/5800968/figures/flow_diag_inference.png"
|
| 152 |
+
},
|
| 153 |
+
"4": {
|
| 154 |
+
"figure_path": "2405.11715v2_figure_4.png",
|
| 155 |
+
"caption": "Figure 4: Example of POIs and classification results from (a) LA county and (b) Egypt.",
|
| 156 |
+
"url": "http://arxiv.org/html/2405.11715v2/extracted/5800968/figures/poi_example_la_egypt.png"
|
| 157 |
+
},
|
| 158 |
+
"5": {
|
| 159 |
+
"figure_path": "2405.11715v2_figure_5.png",
|
| 160 |
+
"caption": "Figure 5: Examples of semantic trajectories post-data mining: (a) Activity-annotated stay points in LA. (b) Activity timelines.",
|
| 161 |
+
"url": "http://arxiv.org/html/2405.11715v2/extracted/5800968/figures/TrajectoryXL.png"
|
| 162 |
+
}
|
| 163 |
+
},
|
| 164 |
+
"validation": true,
|
| 165 |
+
"references": [],
|
| 166 |
+
"url": "http://arxiv.org/html/2405.11715v2"
|
| 167 |
+
}
|
20240819/2405.12535v2.json
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "PhiBE: A PDE-based Bellman Equation for Continuous Time Policy Evaluation",
|
| 3 |
+
"abstract": "In this paper, we address the problem of continuous-time reinforcement learning in scenarios where the dynamics follow a stochastic differential equation. When the underlying dynamics remain unknown and we have access only to discrete-time information, how can we effectively conduct policy evaluation? We first highlight that the commonly used Bellman equation (BE) is not always a reliable approximation to the true value function. We then introduce a new bellman equation, PhiBE, which integrates the discrete-time information into a PDE formulation. The new bellman equation offers a more accurate approximation to the true value function, especially in scenarios where the underlying dynamics change slowly. Moreover, we extend PhiBE to higher orders, providing increasingly accurate approximations. We conduct the error analysis for both BE and PhiBE with explicit dependence on the discounted coefficient, the reward and the dynamics. Additionally, we present a model-free algorithm to solve PhiBE when only discrete-time trajectory data is available. Numerical experiments are provided to validate the theoretical guarantees we propose.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Reinforcement learning (RL) [23 ###reference_b23###] has achieved significant success in applications in discrete-time decision-making process. Remarkable milestones include its applications in Atari Games [16 ###reference_b16###], AlphaGO [21 ###reference_b21###], and ChatGPT [30 ###reference_b30###, 18 ###reference_b18###], demonstrating capabilities similar to human intelligence. In all these applications, there is no concept of time, where state transitions occur only after actions are taken. However, in most applications in the physical world, such as autonomous driving [3 ###reference_b3###, 14 ###reference_b14###] and robotics [12 ###reference_b12###], state changes continuously over time regardless of whether actions are discrete or not. On the other hand, the data are always collected in discrete time. The mismatch between the continuous-time dynamics and discrete-time data makes continuous-time RL more challenging. This paper directs its focus toward addressing continuous-time RL problems that can be equivalently viewed as a stochastic optimal control problem with unknown dynamics [27 ###reference_b27###, 6 ###reference_b6###, 1 ###reference_b1###, 9 ###reference_b9###, 10 ###reference_b10###]. Since one can divide the RL problem into policy evaluation and policy update [24 ###reference_b24###, 13 ###reference_b13###, 25 ###reference_b25###, 9 ###reference_b9###], we first concentrate on the continuous-time policy evaluation (PE) problem in this paper. In particular, we focus on how to approximate the continuous-time PE problem with discrete-time information.\nGiven discrete-time trajectory data generated from the underlying dynamics, a common approach to address the continuous-time PE problem involves discretizing time and treating it as a Markov reward process. This method yields an approximated value function satisfying a Bellman equation, thereby one can use RL algorithms such as Temporal difference [23 ###reference_b23###], gradient TD [20 ###reference_b20###], Least square TD [4 ###reference_b4###] to solve the Bellman equation. However, this paper shows that the Bellman equation is not always a good tool for approximating the continuous-time value function. We show that the solution to the Bellman equation is sensitive to time discretization, the change rate of the rewards and the discount coefficient as shown in Figure 1 ###reference_### (See Section 5.1 ###reference_### for the details of Figure 1 ###reference_###.) Hence, the ineffectiveness of RL algorithms for continuous-time RL doesn\u2019t stem from data stochasticity or insufficient sampling points; rather, it fundamentally arises from the failure of the Bellman equation as an approximation of the true value function. As shown in Figure 1 ###reference_###, the RL algorithms are approximating the solution to the Bellman equation instead of the true value function.\nThe central question we aim to address in this paper is whether, with the same discrete-time information and the same computational cost, one can approximate the true solution more accurately than the Bellman equation.\nWe proposed a PDE-based Bellman equation, called PhiBE. which integrates discrete-time information into a continuous PDE. This approach yields a more accurate approximation of the exact solution compared to the traditional Bellman equation, particularly when the acceleration of the dynamics is small.\nWhen equipped with discrete-time transition distribution, PhiBE is a second-order PDE that contains discrete-time information. The core concept revolves around utilizing discrete-time data to approximate the dynamics rather than the value function. Furthermore, we extend this framework to higher-order PhiBE, which enhances the approximation of the true value solution with respect to the time discretization. There are two potential benefits that arise from PhiBE. Firstly, PhiBE is more robust to various reward functions compared to BE, which allows greater flexibility in designing reward functions to effiectively achieve RL objectives. Secondly, it achieves comparable error to BE with sparser data collection, enhancing the efficiency of RL algorithms.\nAs illustrated in Figure 1 ###reference_###, when provided with the same discrete-time information, the exact solution derived from PhiBE is closer to the true value function than BE. Additionally, we introduce a model-free algorithm for approximating the solution to PhiBE when only discrete-time data are accessible. As depicted in Figure 1 ###reference_###, with exactly the same data and the same computational cost, the proposed algorithm outperforms the RL algorithms drastically.\n###figure_1### ###figure_2### ###figure_3###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Setting",
|
| 15 |
+
"text": "Consider the following continuous-time PE problem, where the value function , defined as follows, is the expected discounted cumulative reward starting from ,\nHere is a discounted coefficient, is a reward function, and the state is driven by the stochastic differential equation (SDE),\nwith unknown drift function and unknown diffusion function .\nIn this paper, we assume that are Lipschitz continuous and the reward function is bounded. This ensures that (2 ###reference_###) has a unique strong solution [17 ###reference_b17###] and the infinite horizon integral is bounded.\nWe aim to approximate the continuous-time value function when only discrete-time information is available. To be more specific, we consider the following two cases:\nThe transition distribution in discrete time , driven by the continuous dynamics (2 ###reference_###), is given. Here represents the probability density function of given .\nTrajectory data generated by the continuous dynamics (2 ###reference_###) and collected at discrete time is given. Here the trajectory data contains independent trajectories, and the initial state of each trajectory are sampled from a distribution .\nWhen the discrete transition distribution is given (Case 1), one can explicitly formulate the Bellman equation. One can also estimate the discrete transition distribution from the trajectory data, which is known as model-based RL. The error analysis in Section 3 ###reference_### is conducted under Case 1. We demonstrate that the Bellman equation is not always the optimal equation to solve continuous-time reinforcement learning problems even when the discrete-time transition dynamics are known, and consequently, all the RL algorithms derived from it are not optimal either. To address this, we introduce a Physics-informed Bellman equation (PhiBE) and establish that its exact solution serves as a superior approximation to the true value function compared to the classical Bellman equation. When only trajectory data is available (Case 2), one can also use the data to solve the PhiBE, referred to as model-free RL, which will be discussed in Section 4 ###reference_###."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "A PDE-based Bellman Equation (PhiBE)",
|
| 21 |
+
"text": "In Section 3.1 ###reference_###, we first introduce the Bellman equation, followed by an error analysis to demonstrate why it is not always a good approximation. Then, in Section 3.2 ###reference_###, we propose the PhiBE, a PDE-based Bellman equation, considering both the deterministic case (Section 3.2.1 ###reference_.SSS1###) and the stochastic case (Section 3.2.2 ###reference_.SSS2###). The error analysis provides guidance on when PhiBE is a better approximation than the BE."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Bellman equation",
|
| 27 |
+
"text": "By approximating the definition of the value function (1 ###reference_###) in discrete time, one obtains the approximated value function,\nIn this way, it can be viewed as a policy evaluation problem in Markov Decision Process, where the state is , the reward is , and the discount factor is and the transition dynamics is . Therefore, the approximated value function satisfies the following Bellman equation [23 ###reference_b23###].\nWhen the discrete-time transition distribution is not given, one can utilize various RL algorithms to solve the Bellman equation (3 ###reference_###) using the trajectory data. However, if the exact solution to the Bellman equation is not a good approximation to the true value function, then all the RL algorithms derived from it will not effectively approximate the true value function. In the theorem below, we provide an upper bound for the distance between the solution to the above BE and the true value function defined in (1 ###reference_###).\nAssume that are bounded, then the solution to the BE (3 ###reference_###) approximates the true value function defined in (1 ###reference_###) with an error\nwhere\nwith , and .\nOne sufficient condition for the assumption to hold is that , for are all bounded. However, is less restrictive than the above and allows, for example, linear dynamics , with the derivative of the reward decreasing faster than a linear function at infinity, .\nThe proof of the theorem is given in Section 6.1 ###reference_###. In fact, by expressing the true value function as the sum of two integrals, one can more clearly tell where the error in the BE comes from. Note that , as defined in (1 ###reference_###), can be equivalently written as,\nOne can interpret the Bellman equation defined in (3 ###reference_###) as an equation resulting from approximating in (5 ###reference_###) by . The error between these two terms can be bounded by:\ncharacterizes the error of in Theorem 3.1 ###reference_theorem1###.\nTheorem 3.1 ###reference_theorem1### indicates that the solution to the Bellman equation (3 ###reference_###) approximates the true value function with a first-order error of . Moreover, the coefficient before suggests that for the same time discretization , when is small, the error is dominated by the term , indicating that the error increases when the reward changes rapidly. Conversely, when is large, the error is mainly affected by , implying that the error increases when the magnitude of the reward is large.\nThe question that the rest of this section seeks to address is whether, given the same discrete-time information, i.e., the transition distribution , time discretization , and discount coefficient , one can achieve a more accurate estimation of the value function compared to the Bellman equation ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "A PDE-based Bellman equation",
|
| 33 |
+
"text": "In this section, we introduce a PDE-based Bellman equation, referred to as PhiBE. We begin by discussing the case of deterministic dynamics in Section 3.2.1 ###reference_.SSS1### to illustrate the idea clearly. Subsequently, we extend our discussion to the stochastic case in Section 3.2.2 ###reference_.SSS2###."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.2.1",
|
| 37 |
+
"parent_section_id": "3.2",
|
| 38 |
+
"section_name": "3.2.1 Deterministic Dynamics",
|
| 39 |
+
"text": "When in (2 ###reference_###), the dynamics become deterministic, which can be described by the following ODE,\nIf the discrete-time transition dynamics is given, where provides the state at time when the state at time is , then the BE in deterministic dynamics reads as follows,\nThe key idea of the new equation is that, instead of approximating the value function directly, one approximates the dynamics. First note that the value function defined in (1 ###reference_###) can be equivalently written as,\nwhich implies that,\nUsing the chain rule on the LHS of the above equation yields , and the RHS can be written as , resulting in a PDE for the true value function\nor equivalently,\nApplying a finite difference scheme, one can approximate by\nand substituting it back into (8 ###reference_###) yields\nAlternatively, this equation can be expressed in the form of a PDE as follows,\nNote that the error now arises from\nwhich only depends on the dynamics. As long as the dynamics change slowly, i.e., the acceleration of dynamics is small, the error diminishes.\nWe refer to (10 ###reference_###) as PhiBE for deterministic dynamics, an abbreviation for the physics-informed Bellman equation, because it incorporates both the current state and the state after , similar to the Bellman equation, while also resembling the form of the PDE (9 ###reference_###) derived from the true continuous-time physical environment. However, unlike the true PDE (9 ###reference_###) and the Bellman equation, where one only possesses continuous information and the other only discrete information, PhiBE combines both continuous PDE form and discrete transition information .\nOne can derive a higher-order PhiBE by employing a higher-order finite difference scheme to approximate . For instance, the second-order finite difference scheme\nresulting in the second-order PhiBE,\nIn this approximation, has a second order error . We summarize -th order PhiBE in deterministic dynamics in the following Definition.\nWhen the underlying dynamics are deterministic, then the -th order PhiBE is defined as,\nwhere\nand\nNote that can be equivalently written as\nThere is an equivalent definition of , given by\nNote that this method differs from the finite difference scheme. In the classical finite difference scheme, the dynamics is known, and the numerical scheme is used to approximate the trajectory . However, it is the opposite here. While the dynamics is unknown, the trajectory is used to approximate the dynamics.\nConsequently, the technique used to demonstrate the convergence and convergence rate of is also distinct from classical numerical analysis. The error analysis of PhiBE in the deterministic dynamics is established in the following theorem.\nAssume that are bounded. Additionally, assume that , then the solution to the PhiBE (11 ###reference_###) is an ith-order approximation to the true value function defined in (1 ###reference_###) with an error\nwhere\nand is a constant defined in (36 ###reference_###) that only depends on the order .\nSee Section 6.2 ###reference_### for the proof of Theorem 3.2 ###reference_theorem2###. Note that the boundedness assumption of is required in general to establish that is an -th order approximation to in Lemma 6.2 ###reference_theorem2###. However, we will see later in Remark 5 ###reference_ark5### and Theorem 3.3 ###reference_theorem3### that one can relax this assumption in linear dynamics.\nBy Theorem 3.2 ###reference_theorem2###, the distance between the first-order PhiBE solution and the true value function can be bounded by\nComparing it with the difference between the BE solution and the true value function in deterministic dynamics,\none observes that when the change of the reward is rapid, i.e., is large, but the change in velocity is slow, i.e., is small, even though both and are first-order approximations to the true value function, has a smaller upper bound.\nThe advantage of the higher-order PhiBE is two-fold. Firstly, it provides a higher-order approximation, enhancing accuracy compared to the first-order PhiBE or BE. Secondly, as demonstrated in Theorem 3.2 ###reference_theorem2###, the approximation error of the -th order PhiBE decreases as decreases. if the \u201cacceleration\u201d, i.e., , of the dynamics is large but the change in acceleration, i.e., , is slow, then the error reduction with the second-order PhiBE will be even more pronounced in addition to the higher-order error effect.\nA sufficient condition for being bounded is that are bounded for all . Note that the linear dynamics does not satisfy this condition. We lose some sharpness for the upper bound to make the theorem work for all general dynamics. However, we prove in Theorem 3.3 ###reference_theorem3### that PhiBE works when , and one can derive a sharper error estimate for this case.\nAdditionally, when the underlying dynamics are linear, one can conduct a sharper error analysis for PhiBE.\nWhen the underlying dynamics follows\n,\nthen the solution to the -th order PhiBE in deterministic dynamics approximates the true value function with an error\nwhere is a constant defined in (36 ###reference_###) that only depends on the order .\nThe proof of the above theorem is provided in Section 6.3 ###reference_###. We also establish the upper bound for the BE in the same dynamics, and it turns out that the upper bound in Theorem 3.1 ###reference_theorem1### is already sharp. According to Theorem 3.3 ###reference_theorem3###, the error of the -th order PhiBE for linear dynamics depends on . This means that even if the dynamics changes exponentially fast when , as long as , PhiBE solution is still a good approximation to the true value function. Especially when , higher order PhiBE gives a smaller approximation error in addition to the higher-order effect."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2.2",
|
| 43 |
+
"parent_section_id": "3.2",
|
| 44 |
+
"section_name": "3.2.2 Stochastic dynamics",
|
| 45 |
+
"text": "When is a non-degenerate matrix, then the dynamics is stochastic and driven by the SDE in (2 ###reference_###). By Feynman\u2013Kac theorem [22 ###reference_b22###], the value function satisfies the following PDE,\nwhere is an operator defined in (4 ###reference_###). However, one cannot directly solve the PDE (16 ###reference_###) as are unknown. In the case where one only has access to the discrete-time transition distribution , we propose an -th order PhiBE in the stochastic dynamics to approximate the true value function .\nWhen the underlying dynamics are stochastic, then the -th order PhiBE is defined as,\nwhere\nwhere is defined in (4 ###reference_###), and is defined in (13 ###reference_###).\nThere is another -th order approximation for ,\nHowever, the unbiased estimate for\nrequires two independent samples starting from , which are usually unavailable in the RL setting. This is known as the \u201cDouble Sampling\u201d problem. One could apply a similar idea in [29 ###reference_b29###, 28 ###reference_b28###] to alleviate the double sampling problem when the underlying dynamics are smooth, that is, approximating . However, it will introduce additional bias into the approximation. We leave the study of this approximation or the application of BFF on for future research.\nThe first and second-order approximations are presented as follows. The first-order approximation reads,\nand the second-order approximation reads,\nNext, we show the solution to the th-order PhiBE provides a th-order approximation to the true value function . To establish the error analysis, the following assumptions are required.\nAssumptions on the dynamics:\nfor .\nare bounded for .\nThe first assumption ensures the coercivity of the operator , which is necessary to establish the regularity of . The second assumption is employed to demonstrate that and are -th approximations to , respectively. Additionally, under the above assumption, there exists a stationary distribution to the stochastic dynamics that satisfies [7 ###reference_b7###],\nWe define a weighted norm under the above stationary distribution,\nFurthermore, one can bound\nby Theorem 1.1 of [2 ###reference_b2###].\nFirst, the error analysis for BE in the weighted norm is presented in the following theorem.\nAssume that are bounded, then the solution to the BE (3 ###reference_###) approximates the true value function defined in (1 ###reference_###) with an error\nThe proof of Theorem 3.4 ###reference_theorem4### is given in Section 6.4 ###reference_###. Next, the error analysis for PhiBE in stochastic dynamics is presented in the following theorem.\nUnder Assumption 1 ###reference_umption1###, and , the solution to the i-th order PhiBE (17 ###reference_###) is an i-th order approximation to the true value function defined in (1 ###reference_###) with an error\nwhere are constants defined in (42 ###reference_###), (43 ###reference_###) depending on .\nThe proof of Theorem 3.5 ###reference_theorem5### is given in Section 6.5 ###reference_###.\nHere we discuss two cases. The first case is when the diffusion is known, that is, , then the distance between the PhiBE and the true value function can be bounded by\nSimilar to the deterministic case, the error of the -th order PhiBE proportional to the change rate of the dynamics . One can refer to Remarks 3 ###reference_ark3### and 4 ###reference_ark4### for the benefit of the 1st-order PhiBE and higher-order PhiBE with respect to different dynamics.\nThe second case is when both drift and diffusion are unknown. Then the distance between the first-order PhiBE and the true value function can be bounded by\nwhere\nHere the operator represents . This indicates that when is large or are small, the difference between and is smaller. Comparing it with the upper bound for the BE, which is more sensitive to and reward function, the PhiBE approximation is less sensitive to these factors. When the change in the dynamics is slow, or the noise is large, even the first-order PhiBE solution is a better approximation to the true value function."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Model-free Algorithm for continuous-time Policy Eveluation",
|
| 51 |
+
"text": "In this section, we assume that one only has access to the discrete-time trajectory data . We first revisit the Galerkin method for solving PDEs with known dynamics in Section 4.1 ###reference_###, and we provide the error analysis of the Galerkin method for PhiBE. Subsequently, we introduce a model-free Galerkin method in Section 4.2 ###reference_###."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.1",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Galerkin Method",
|
| 57 |
+
"text": "Given bases , the objective is to find an approximation to the solution of the PDE,\nwhere , and is defined in (4 ###reference_###). The Galerkin method involves inserting the ansatz into the PDE and then projecting it onto the finite bases,\nwhich results in a linear system of ,\nWhen the dynamics are known, one can explicitly compute the matrix and the vector , and find the parameter accordingly.\nIn continuous-time policy evaluation problems, one does not have access to the underlying dynamics . However, the approximated dynamics is given through PhiBE. Therefore, if one has access to the discrete-time transition distribution, then the parameter can be solved for by approximating with\nwhere are defined in (18 ###reference_###).\nWe give the error estimate of the Galerkin method for PhiBE in the following theorem.\nThe Galerkin solution satisfies\nWhen is bounded for the stationary distribution , then as long as , the Galerkin solution approximates the true value function defined in (1 ###reference_###) with an error\nWhen is unbounded, assume that the bases is bounded, then as long as , the Galerkin solution approximates the true value function defined in (1 ###reference_###) with an error\nwhere are constants depending on defined in (61 ###reference_###), (62 ###reference_###), (65 ###reference_###), (66 ###reference_###), (42 ###reference_###) respectively, and .\nThe proof of the Theorem is given in Section 6.6 ###reference_###."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.2",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Model-free Galerkin method for PhiBE",
|
| 63 |
+
"text": "When only discrete-time trajectory data is available, we first develop an unbiased estimate for from the trajectory data,\nwith defined in (13 ###reference_###).\nThen, using the above unbiased estimate, one can approximate the matrix and the vector by\nBy solving the linear system , one obtains the approximated value function in terms of the finite bases. Note that our algorithm can also be applied to stochastic rewards or even unknown rewards as only observation of rewards is required at discrete time. We summarize the model-free Galerkin method for deterministic and stochastic dynamics in Algorithm 1 ###reference_### and Algorithm 2 ###reference_###, respectively.\nGiven: discrete time step , discount coefficient , discrete-time trajectory data generated from the underlying dynamics, and a finite bases .\nGiven: discrete time step , discount coefficient , discrete-time trajectory data generated from the underlying dynamics, and a finite bases ."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Numerical experiments",
|
| 69 |
+
"text": ""
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5.1",
|
| 73 |
+
"parent_section_id": "5",
|
| 74 |
+
"section_name": "Deterministic dynamics",
|
| 75 |
+
"text": "We first consider deterministic dynamics, where the state space is defined as . We consider two kinds of underlying dynamics, one is linear,\nand the other is nonlinear,\nThe reward is set to be for the linear case and for the nonlinear case, where the value function can be exactly obtained, in both cases. We use periodic bases with large enough so that the solution can be accurately represented by these finite bases.\nFor the linear dynamics, the discrete-time transition dynamics are\nHence, one can express the BE as\nand -th order PhiBE as\nrespectively for defined in (13 ###reference_###).\nFor the nonlinear dynamics, we approximate and generate the trajectory data numerically,\nwith sufficiently small.\nThe trajectory data are generated from different initial values Unif, and each trajectory has data points, . Algorithm 1 ###reference_### is used to solve for the PhiBE, and LSTD is used to solve for BE. LSTD is similar to Algorithm 1 ###reference_### except that one uses derived from the BE (3 ###reference_###),\ninstead of .\nIn Figure 1 ###reference_###, the data are generated from the linear dynamics (23 ###reference_###) with and collected at different . We compare the solution to the second-order PhiBE with the solution to BE (when the discrete-time transition dynamics are known), and the performance of LSTD with the proposed Algorithm 1 ###reference_### (when only trajectory data are available) with different data collection interval , discount coefficient and oscillation of reward . Note that the exact solution to BE is computed as with large enough, and the exact solution to PhiBE is calculated by applying the Galerkin method to (26 ###reference_###).\nIn Figure 2 ###reference_###, the data are generated from the nonlinear dynamics (24 ###reference_###) and collected at different . We compare the solutions to the first-order and second-order PhiBE with the solution to the BE (when the discrete-time transition dynamics are known), and the performance of LSTD with the proposed Algorithm 1 ###reference_### (when only trajectory data are available) with different .\nIn Figure 3 ###reference_###, the distances of the solution from PhiBE, BE to the true value function are plotted as ; the distances of the approximated solution by Algorithm 1 ###reference_### and LSTD to the true value function are plotted as the amount of data increases. Here, the distance is measured using the norm\nIn Figures 1 ###reference_### and 2 ###reference_###, when the discrete-time transition dynamics are known, PhiBE solution is much closer to the true value function compared to the BE solution in all the experiments. Especially, the second-order PhiBE solution is almost identical to the exact value function. Additionally, when only trajectory data is available, one can approximate the solutions to PhiBE very well with only or data points. Particularly, when is large, the solution to PhiBE still approximates the true solution very well, which indicates that one can collect data sparsely based on PhiBE. Moreover, the solution to PhiBE is not sensitive to the oscillation of the reward function, which implies that one has more flexibility in designing the reward function in the RL problem. Besides, unlike BE, the error increases when is too small or too large, while the error for PhiBE decays as increases. Furthermore, it\u2019s noteworthy that in Figure 2 ###reference_###/(b) and (c), for relatively large changes in the dynamics indicated by and , respectively, PhiBE still provides a good approximation.\nIn Figure 3 ###reference_###/(a) and (b), one can observe that the solution for BE approximates the true solution in the first order, while the solution for -th order PhiBE approximates the true solution in -th order.\nIn Figure 3 ###reference_###/(c) and (d), one can see that as the amount of data increases, the error from the LSTD algorithm stops decreasing when it reaches . This is because the error between BE and the true value function dominates the data error. On the other hand, for higher-order PhiBE, as the amount of data increases, the performance of the algorithm improves, and the error can achieve .\n###figure_4### ###figure_5### ###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5.2",
|
| 79 |
+
"parent_section_id": "5",
|
| 80 |
+
"section_name": "Stochastic dynamics",
|
| 81 |
+
"text": "We consider the Ornstein\u2013Uhlenbeck process,\nwith . Here the reward is set to be , where the value function can be exactly obtained,\n. For OU process, since the conditional density function for given follows the normal distribution with expectation , variance . Both PhiBE and BE have explicit forms. One can express PhiBE as,\nand BE as,\nwhere\nIn Figure 4 ###reference_###, we compare the exact solution and approximated solution to PhiBE and BE, respectively, for different . In Figure 5 ###reference_###/(a), the decay of the error as for the exact solutions to PhiBE and BE are plotted. In Figure 5 ###reference_### /(b), the decay of the approximated solution to PhiBE and BE based on Algorithm 2 ###reference_### and LSTD are plotted with an increasing amount of data.\nWe observe similar performance in the stochastic dynamics as in the deterministic dynamics, as shown in Figures 4 ###reference_### and 5 ###reference_###. In Figure 5 ###reference_###, the variance of the higher order PhiBE is larger than that of the first-order PhiBE because it involves more future steps. However, note that the error is plotted on a logarithmic scale. Therefore, when the error is smaller, although the variance appears to have the same width on the plot, it is actually much smaller. Particularly, when the amount of the data exceeds , the variance is smaller than .\n###figure_11### ###figure_12### ###figure_13### ###figure_14### ###figure_15###"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Proofs",
|
| 87 |
+
"text": ""
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6.1",
|
| 91 |
+
"parent_section_id": "6",
|
| 92 |
+
"section_name": "Proof of Theorem 3.1.",
|
| 93 |
+
"text": "Let be the probability density function of that starts from , then it satisfies the following PDE\nwith initial data .\nLet , then\nSince\nwhere the second equality is due to the mean value theorem, and the third equality is obtained by inserting the equation (32 ###reference_###) for and integrating by parts. Therefore, for\nTherefore, one has\nwhere .\nSince\none has,\n\u220e"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "6.2",
|
| 97 |
+
"parent_section_id": "6",
|
| 98 |
+
"section_name": "Proof of Theorem 3.2",
|
| 99 |
+
"text": "Note that the true value function and the -th order PhiBE solution satisfies\nFirst, by the following lemma, one can bound with .\nFor function and satisfies,\nthe distance between and can be bounded by\n(See Section 6.2.1 ###reference_.SSS1### for the proof of the above lemma.)\nTherefore, one has\nThen by the following lemma, one can further bound .\nThe distance between defined in (12 ###reference_###) and the true dynamics can be bounded by\nwhere is defined in (15 ###reference_###), and\n(See Section 6.2.2 ###reference_.SSS2### for the proof of the above lemma.) Hence, one completes the proof by applying the above lemma to (35 ###reference_###)"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6.2.1",
|
| 103 |
+
"parent_section_id": "6.2",
|
| 104 |
+
"section_name": "6.2.1 Proof of Lemma 6.1",
|
| 105 |
+
"text": "By Feynman\u2013Kac theorem, it is equivalently to write as,\nHence,\nwhere\nSubtracting the two equations in (38 ###reference_###) and multiplying it with gives\nwhere the mean value theorem is used in the first inequality. This implies\nInserting the above inequality back to (37 ###reference_###) gives\nAssigning to the above inequality completes the proof."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6.2.2",
|
| 109 |
+
"parent_section_id": "6.2",
|
| 110 |
+
"section_name": "6.2.2 Proof of Lemma 6.2",
|
| 111 |
+
"text": "By Taylor expansion, one has\nwith . Inserting it into gives,\nwhere the last equality is due to the definition of in (14 ###reference_###). Since\none has\nSince\nthen as long as is bounded, one has"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "6.3",
|
| 115 |
+
"parent_section_id": "6",
|
| 116 |
+
"section_name": "Proof of Theorem 3.3",
|
| 117 |
+
"text": "The first-order PhiBE solution satisfies\nBy setting , one can write equivalently as\nwhich yields,\nwhere the second equality is obtained by applying the integral residual of Taylor expansion, and the third equality is obtained by setting .\nSince\none has\nNext, we prove the upper bound for BE solution in the linear dynamics.\nSince\nwhich implies,\n\u220e"
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "6.4",
|
| 121 |
+
"parent_section_id": "6",
|
| 122 |
+
"section_name": "Proof of Theorem 3.4",
|
| 123 |
+
"text": "We first present the property of the operator that will be frequently used later in the following Proposition.\nFor the operator defined in (4 ###reference_###), under Assumption 1 ###reference_umption1###/(a), one has\nwhere is defined in (39 ###reference_###) depending on the first derivatives of .\nInserting the operator , and applying integral by parts gives,\nwhere the last inequality is because of the definition of the stationary solution (19 ###reference_###) and the positivity of the matrix in Assumption 1 ###reference_umption1###.\nFor the second part of the Lemma, first note that\nTherefore, applying the first part of the Lemma gives\nwhere\nFor the last two inequalities, one notes\nBy bounding the last term differently,\none ends up with the last two inequalities of the Lemma.\n\u220e\nNow we are ready to prove Theorem 3.4 ###reference_theorem4###.\nBy (33 ###reference_###), one has\nwhere the Jensen\u2019s inequality is used.\nBy (34 ###reference_###), one has for\nwhere\nNote that both and satisfies\nBy Proposition 6.3 ###reference_theorem3###, one has\nwhich implies,\nTherefore, one has\nInserting it back to (40 ###reference_###) yields,\nwhich completes the proof.\n\u220e"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "6.5",
|
| 127 |
+
"parent_section_id": "6",
|
| 128 |
+
"section_name": "Proof of Theorem 3.5",
|
| 129 |
+
"text": "First note that satisfies,\nBy the following Lemma, one can bound by the distance between and .\nFor satisfying\nunder Assumption 1 ###reference_umption1###/(a), if , , and , one has\n(See Section 6.5.1 ###reference_.SSS1### for the proof of the above lemma)\nThen we further apply the following lemma regarding the distance between and .\nUnder Assumption 1 ###reference_umption1###, for defined in (18 ###reference_###), one has\nand\nwhere are constants depending on defined in (46 ###reference_###), (49 ###reference_###), (51 ###reference_###), respectively.\n(See Section 6.5.2 ###reference_.SSS2### for the proof of the above lemma) Combine the above two lemmas, one can bound\nfor\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###).\nFurthermore, by the following lemma on the upper bound for ,\nUnder Assumption 1 ###reference_umption1###/(a), for satisfying (16 ###reference_###), one has\nwhere is a constant defined in (39 ###reference_###) that depends on .\n(See Section 6.5.3 ###reference_.SSS3### for the proof of the above lemma)\none has,\nwhere\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###), (20 ###reference_###), (39 ###reference_###).\nFirst note that\nwhere is defined in (32 ###reference_###).\nBy Taylor\u2019s expansion, one has\nInserting the above equation into (52 ###reference_###) yields,\nBy the definition of , the first part can simplified to\nApply integration by parts, the second part can be written as\nwhich completes the proof.\n\u220e\nNote that satisfies the following forward Kolmogorov equation [19 ###reference_b19###],\nMultiplying to the above equation and let , with Assumption 1 ###reference_umption1###/(a), one has\nOn the other hand, satisfies\nMultiplying to the above equation and then summing it over gives,\nAdding the above inequality to (53 ###reference_###) with and defined in (39 ###reference_###), one has\nLet , then . Let , then satisfying , with . Since , by comparison theorem, one has for\nwhich completes the proof for the first inequality.\nFor the second , first note that it satisfies the following PDE,\nMultiplying it with and letting gives,\nLet , then,\nSimilarly, by comparison theorem, , which implies,\nwhere the last equality is obtained by selecting .\nOn the other hand, taking to (55 ###reference_###) and multiply to it gives,\nAdding the above inequality to (54 ###reference_###) with and defined in (39 ###reference_###), one has\nLet , then\nBy the comparison theorem, one has , which implies\n\u220e"
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "6.5.1",
|
| 133 |
+
"parent_section_id": "6.5",
|
| 134 |
+
"section_name": "6.5.1 Proof of Lemma 6.4",
|
| 135 |
+
"text": "Substracting the second equation from the first one and let gives,\nMultiply the above equation with and integrate it over , one has,\nwhere the first and third equations in Proposition 6.3 ###reference_theorem3### are used for the first inequality, are used for the second inequality, and , , .\nUnder the assumption that , one has\nUnder the assumption that , one has\nwhich yields,"
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "6.5.2",
|
| 139 |
+
"parent_section_id": "6.5",
|
| 140 |
+
"section_name": "6.5.2 Proof of Lemma 6.5",
|
| 141 |
+
"text": "The proof of Lemma 6.5 ###reference_theorem5### replies the following two lemmas, which we will prove later.\nDefine operator with defined in (14 ###reference_###) and , then\nFor with driven by the SDE (2 ###reference_###), then under Assumption 1 ###reference_umption1###/(a), one has\nFor with driven by the SDE (2 ###reference_###), then under Assumption 1 ###reference_umption1###/(a), one has\nNow we are ready to prove Lemma 6.5 ###reference_theorem5###.\nBy Lemma 6.7 ###reference_theorem7###, one has\nwhich implies that\nwhere\nTo prove the second inequality in the lemma, first apply Lemma 6.7 ###reference_theorem7###, one has\nNote that\nis a function that only depends on the derivative up to -th order, which can be bounded under Assumption 1 ###reference_umption1###/(b).\nThus applying the second inequality of Lemma 6.8 ###reference_theorem8### yields\nHence, one has,\nwhere for are used in the first inequality.\nPlugging the above inequality back to (47 ###reference_###) implies\nwhere\nTo prove the third inequality, one first takes to (47 ###reference_###),\nwhere\nwith defined in (48 ###reference_###).\nTherefore, by the first and third inequalities in Lemma 6.8 ###reference_theorem8###, and denoting , one has\nwhere\nwith defined in (46 ###reference_###), (48 ###reference_###), (39 ###reference_###).\nFirst note that\nwhere is defined in (32 ###reference_### ###reference_###).\nBy Taylor\u2019s expansion, one has\nInserting the above equation into (52 ###reference_### ###reference_###) yields,\nBy the definition of , the first part can simplified to\nApply integration by parts, the second part can be written as\nwhich completes the proof.\n\u220e\nNote that satisfies the following forward Kolmogorov equation [19 ###reference_b19### ###reference_b19###],\nMultiplying to the above equation and let , with Assumption 1 ###reference_umption1### ###reference_umption1###/(a), one has\nOn the other hand, satisfies\nMultiplying to the above equation and then summing it over gives,\nAdding the above inequality to (53 ###reference_### ###reference_###) with and defined in (39 ###reference_### ###reference_###), one has\nLet , then . Let , then satisfying , with . Since , by comparison theorem, one has for\nwhich completes the proof for the first inequality.\nFor the second , first note that it satisfies the following PDE,\nMultiplying it with and letting gives,\nLet , then,\nSimilarly, by comparison theorem, , which implies,\nwhere the last equality is obtained by selecting .\nOn the other hand, taking to (55 ###reference_### ###reference_###) and multiply to it gives,\nAdding the above inequality to (54 ###reference_### ###reference_###) with and defined in (39 ###reference_### ###reference_###), one has\nLet , then\nBy the comparison theorem, one has , which implies\n\u220e"
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "6.5.3",
|
| 145 |
+
"parent_section_id": "6.5",
|
| 146 |
+
"section_name": "6.5.3 Proof of Lemma 6.6",
|
| 147 |
+
"text": "Based on Proposition 6.3 ###reference_theorem3###, one has\nMultiplying to the first inequality and adding it to the second one gives\nwhich implies\nFor the second inequality,\nLet , then by Lemma 6.8 ###reference_theorem8###, one has\nSimilar to the estimate of the in Lemma 6.2 ###reference_theorem2###, one has,\nwhere\nwith defined in (46 ###reference_###), (48 ###reference_###), (39 ###reference_###).\nTaking to (45 ###reference_###) gives,\nwith\nBy Lemma 6.8 ###reference_theorem8###, one has,\nwith\nTherefore,"
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"section_id": "6.6",
|
| 151 |
+
"parent_section_id": "6",
|
| 152 |
+
"section_name": "Proof of Theorem 4.1",
|
| 153 |
+
"text": "The -th approximation can be divided into two parts,\nwhere could be any functions in the linear space spanned by .\nNote that satisfies\nwhich can be divided into two parts,\nsubtract the above equation from the Galerkin equation (21 ###reference_###) gives\nLet , then multiplying to the above equation yields,\nWhen is bounded, by applying the last inequality of Lemma 6.3 ###reference_theorem3### and Lemma 6.5 ###reference_theorem5###, one has\nwhere\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###).\nUnder the assumption that , i.e.,\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###), one has\nwhich implies that\nSince the above inequality holds for all in the linear space spanned by , therefore,\nwhere\nwith defined in (60 ###reference_###).\nWhen is not bounded, by applying the last second inequality of Lemma 6.3 ###reference_theorem3### and Lemma 6.5 ###reference_theorem5### to (59 ###reference_###), one has\nwhere\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###), (20 ###reference_###).\nSince\nwhere is the smallest eigenvalue of the matrix , where . Note that the matrix is always positive definite when are linear independent bases w.r.t. the weighted norm. let , then (63 ###reference_###) can be rewritten as\nwhere\nWhen , i.e.,\nwith defined in (46 ###reference_###), (49 ###reference_###), (57 ###reference_###),\nthen one has\nwhich implies that\nSince the above inequality holds for all in the linear space spanned by , therefore,\nwhere\nwith defined in (60 ###reference_###), (64 ###reference_###)."
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"section_id": "7",
|
| 157 |
+
"parent_section_id": null,
|
| 158 |
+
"section_name": "Conclusion",
|
| 159 |
+
"text": "In this paper, we introduce PhiBE, a PDE-based Bellman equation that integrates discrete-time information into continuous-time PDEs. The new BE outperforms the classical Bellman equation in approximating the continuous-time policy evaluation problem, particularly in scenarios where underlying dynamics evolve slowly. Importantly, the approximation error of PhiBE depends on the dynamics, making it more robust against changes in reward structures. This property allows greater flexibility in designing reward functions to effectively achieve RL objectives.\nFurthermore, we propose higher-order PhiBE, which offers superior approximations to the true value function. One can achieve the same error with sparse data, enhancing the learning efficiency.\nThis paper serves as the first step for using PhiBE in continuous-time RL, laying the groundwork for future research directions. Specifically, we defer the investigation of the degenerate diffusion case and sample complexity considerations for subsequent studies. Additionally, our methodology can also be extended to encompass broader RL settings."
|
| 160 |
+
}
|
| 161 |
+
],
|
| 162 |
+
"appendix": [],
|
| 163 |
+
"tables": {},
|
| 164 |
+
"image_paths": {
|
| 165 |
+
"1(a)": {
|
| 166 |
+
"figure_path": "2405.12535v2_figure_1(a).png",
|
| 167 |
+
"caption": "(a) \u0394\u2062t=5,\u03b2=0.1,V\u2062(s)=cos3\u2061(s)formulae-sequence\u0394\ud835\udc615formulae-sequence\ud835\udefd0.1\ud835\udc49\ud835\udc60superscript3\ud835\udc60{\\Delta t}=5,\\beta=0.1,V(s)=\\cos^{3}(s)roman_\u0394 italic_t = 5 , italic_\u03b2 = 0.1 , italic_V ( italic_s ) = roman_cos start_POSTSUPERSCRIPT 3 end_POSTSUPERSCRIPT ( italic_s ).\nFigure 1: Here the data are collected every \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t unite of time, \u03b2\ud835\udefd\\betaitalic_\u03b2 is the discount coefficient, and V\u2062(s)\ud835\udc49\ud835\udc60V(s)italic_V ( italic_s ) is the true value function. In our setting, a larger discount coefficient indicates that future rewards are discounted more. LSTD [4] is a popular RL algorithm for linear function approximation. The PhiBE is proposed in Section 3 and the algorithm is proposed in Section 4.",
|
| 168 |
+
"url": "http://arxiv.org/html/2405.12535v2/x1.png"
|
| 169 |
+
},
|
| 170 |
+
"1(b)": {
|
| 171 |
+
"figure_path": "2405.12535v2_figure_1(b).png",
|
| 172 |
+
"caption": "(b) \u0394\u2062t=0.5,\u03b2=0.1,V\u2062(s)=cos3\u2061(10\u2062s)formulae-sequence\u0394\ud835\udc610.5formulae-sequence\ud835\udefd0.1\ud835\udc49\ud835\udc60superscript310\ud835\udc60{\\Delta t}=0.5,\\beta=0.1,\\\\\nV(s)=\\cos^{3}(10s)roman_\u0394 italic_t = 0.5 , italic_\u03b2 = 0.1 , italic_V ( italic_s ) = roman_cos start_POSTSUPERSCRIPT 3 end_POSTSUPERSCRIPT ( 10 italic_s ).\nFigure 1: Here the data are collected every \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t unite of time, \u03b2\ud835\udefd\\betaitalic_\u03b2 is the discount coefficient, and V\u2062(s)\ud835\udc49\ud835\udc60V(s)italic_V ( italic_s ) is the true value function. In our setting, a larger discount coefficient indicates that future rewards are discounted more. LSTD [4] is a popular RL algorithm for linear function approximation. The PhiBE is proposed in Section 3 and the algorithm is proposed in Section 4.",
|
| 173 |
+
"url": "http://arxiv.org/html/2405.12535v2/x2.png"
|
| 174 |
+
},
|
| 175 |
+
"1(c)": {
|
| 176 |
+
"figure_path": "2405.12535v2_figure_1(c).png",
|
| 177 |
+
"caption": "(c) \u0394\u2062t=0.1,\u03b2=10,V\u2062(s)=cos3\u2061(s)formulae-sequence\u0394\ud835\udc610.1formulae-sequence\ud835\udefd10\ud835\udc49\ud835\udc60superscript3\ud835\udc60{\\Delta t}=0.1,\\beta=10,V(s)=\\cos^{3}(s)roman_\u0394 italic_t = 0.1 , italic_\u03b2 = 10 , italic_V ( italic_s ) = roman_cos start_POSTSUPERSCRIPT 3 end_POSTSUPERSCRIPT ( italic_s ).\nFigure 1: Here the data are collected every \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t unite of time, \u03b2\ud835\udefd\\betaitalic_\u03b2 is the discount coefficient, and V\u2062(s)\ud835\udc49\ud835\udc60V(s)italic_V ( italic_s ) is the true value function. In our setting, a larger discount coefficient indicates that future rewards are discounted more. LSTD [4] is a popular RL algorithm for linear function approximation. The PhiBE is proposed in Section 3 and the algorithm is proposed in Section 4.",
|
| 178 |
+
"url": "http://arxiv.org/html/2405.12535v2/x3.png"
|
| 179 |
+
},
|
| 180 |
+
"2(a)": {
|
| 181 |
+
"figure_path": "2405.12535v2_figure_2(a).png",
|
| 182 |
+
"caption": "(a) Nonlinear dynamics (24) with \u0394\u2062t=5,\u03b2=0.1,k=1,\u03bb=0.1formulae-sequence\u0394\ud835\udc615formulae-sequence\ud835\udefd0.1formulae-sequence\ud835\udc581\ud835\udf060.1{\\Delta t}=5,\\beta=0.1,k=1,\\lambda=0.1roman_\u0394 italic_t = 5 , italic_\u03b2 = 0.1 , italic_k = 1 , italic_\u03bb = 0.1\nFigure 2: The PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 1 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 183 |
+
"url": "http://arxiv.org/html/2405.12535v2/x4.png"
|
| 184 |
+
},
|
| 185 |
+
"2(b)": {
|
| 186 |
+
"figure_path": "2405.12535v2_figure_2(b).png",
|
| 187 |
+
"caption": "(b) Nonlinear dynamics (24) with \u0394\u2062t=0.1,\u03b2=10,k=10,\u03bb=5formulae-sequence\u0394\ud835\udc610.1formulae-sequence\ud835\udefd10formulae-sequence\ud835\udc5810\ud835\udf065{\\Delta t}=0.1,\\beta=10,k=10,\\lambda=5roman_\u0394 italic_t = 0.1 , italic_\u03b2 = 10 , italic_k = 10 , italic_\u03bb = 5\nFigure 2: The PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 1 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 188 |
+
"url": "http://arxiv.org/html/2405.12535v2/x5.png"
|
| 189 |
+
},
|
| 190 |
+
"2(c)": {
|
| 191 |
+
"figure_path": "2405.12535v2_figure_2(c).png",
|
| 192 |
+
"caption": "(c) Nonlinear dynamics (24) with \u0394\u2062t=0.1,\u03b2=10,k=10,\u03bb=2formulae-sequence\u0394\ud835\udc610.1formulae-sequence\ud835\udefd10formulae-sequence\ud835\udc5810\ud835\udf062{\\Delta t}=0.1,\\beta=10,k=10,\\lambda=2roman_\u0394 italic_t = 0.1 , italic_\u03b2 = 10 , italic_k = 10 , italic_\u03bb = 2\nFigure 2: The PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 1 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 193 |
+
"url": "http://arxiv.org/html/2405.12535v2/x6.png"
|
| 194 |
+
},
|
| 195 |
+
"3(a)": {
|
| 196 |
+
"figure_path": "2405.12535v2_figure_3(a).png",
|
| 197 |
+
"caption": "(a) Linear dynamics (24).\nFigure 3: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in the left two figures. The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in the right two figures. We set \u03bb=0.05,\u03b2=0.1,k=1formulae-sequence\ud835\udf060.05formulae-sequence\ud835\udefd0.1\ud835\udc581\\lambda=0.05,\\beta=0.1,k=1italic_\u03bb = 0.05 , italic_\u03b2 = 0.1 , italic_k = 1 in both linear and nonlinear cases.",
|
| 198 |
+
"url": "http://arxiv.org/html/2405.12535v2/x7.png"
|
| 199 |
+
},
|
| 200 |
+
"3(b)": {
|
| 201 |
+
"figure_path": "2405.12535v2_figure_3(b).png",
|
| 202 |
+
"caption": "(b) Nonlinear dynamics (24)\nFigure 3: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in the left two figures. The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in the right two figures. We set \u03bb=0.05,\u03b2=0.1,k=1formulae-sequence\ud835\udf060.05formulae-sequence\ud835\udefd0.1\ud835\udc581\\lambda=0.05,\\beta=0.1,k=1italic_\u03bb = 0.05 , italic_\u03b2 = 0.1 , italic_k = 1 in both linear and nonlinear cases.",
|
| 203 |
+
"url": "http://arxiv.org/html/2405.12535v2/x8.png"
|
| 204 |
+
},
|
| 205 |
+
"3(c)": {
|
| 206 |
+
"figure_path": "2405.12535v2_figure_3(c).png",
|
| 207 |
+
"caption": "(c) Linear dynamics (24).\nFigure 3: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in the left two figures. The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in the right two figures. We set \u03bb=0.05,\u03b2=0.1,k=1formulae-sequence\ud835\udf060.05formulae-sequence\ud835\udefd0.1\ud835\udc581\\lambda=0.05,\\beta=0.1,k=1italic_\u03bb = 0.05 , italic_\u03b2 = 0.1 , italic_k = 1 in both linear and nonlinear cases.",
|
| 208 |
+
"url": "http://arxiv.org/html/2405.12535v2/x9.png"
|
| 209 |
+
},
|
| 210 |
+
"3(d)": {
|
| 211 |
+
"figure_path": "2405.12535v2_figure_3(d).png",
|
| 212 |
+
"caption": "(d) Nonlinear dynamics (24).\nFigure 3: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in the left two figures. The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in the right two figures. We set \u03bb=0.05,\u03b2=0.1,k=1formulae-sequence\ud835\udf060.05formulae-sequence\ud835\udefd0.1\ud835\udc581\\lambda=0.05,\\beta=0.1,k=1italic_\u03bb = 0.05 , italic_\u03b2 = 0.1 , italic_k = 1 in both linear and nonlinear cases.",
|
| 213 |
+
"url": "http://arxiv.org/html/2405.12535v2/x10.png"
|
| 214 |
+
},
|
| 215 |
+
"4(a)": {
|
| 216 |
+
"figure_path": "2405.12535v2_figure_4(a).png",
|
| 217 |
+
"caption": "(a) Stochastic dynamics (29) with \u0394\u2062t=1,\u03b2=0.1,k=1formulae-sequence\u0394\ud835\udc611formulae-sequence\ud835\udefd0.1\ud835\udc581{\\Delta t}=1,\\beta=0.1,k=1roman_\u0394 italic_t = 1 , italic_\u03b2 = 0.1 , italic_k = 1.\nFigure 4: \nThe PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 2 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 218 |
+
"url": "http://arxiv.org/html/2405.12535v2/x11.png"
|
| 219 |
+
},
|
| 220 |
+
"4(b)": {
|
| 221 |
+
"figure_path": "2405.12535v2_figure_4(b).png",
|
| 222 |
+
"caption": "(b) Stochastic dynamics (29) with \u0394\u2062t=0.1,\u03b2=0.1,k=5formulae-sequence\u0394\ud835\udc610.1formulae-sequence\ud835\udefd0.1\ud835\udc585{\\Delta t}=0.1,\\beta=0.1,k=5roman_\u0394 italic_t = 0.1 , italic_\u03b2 = 0.1 , italic_k = 5.\nFigure 4: \nThe PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 2 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 223 |
+
"url": "http://arxiv.org/html/2405.12535v2/x12.png"
|
| 224 |
+
},
|
| 225 |
+
"4(c)": {
|
| 226 |
+
"figure_path": "2405.12535v2_figure_4(c).png",
|
| 227 |
+
"caption": "(c) Stochastic dynamics (29) with \u0394\u2062t=0.1,\u03b2=10,k=1formulae-sequence\u0394\ud835\udc610.1formulae-sequence\ud835\udefd10\ud835\udc581{\\Delta t}=0.1,\\beta=10,k=1roman_\u0394 italic_t = 0.1 , italic_\u03b2 = 10 , italic_k = 1.\nFigure 4: \nThe PhiBE solution and the BE solution, when the discrete-time transition dynamics are given, are plotted in solid lines. The approximated PhiBE solution based on Algorithm 2 and the approximated BE solution based on LSTD, when discrete-time data is given, are plotted in dash lines. Both algorithms utilize the same data points.",
|
| 228 |
+
"url": "http://arxiv.org/html/2405.12535v2/x13.png"
|
| 229 |
+
},
|
| 230 |
+
"5(a)": {
|
| 231 |
+
"figure_path": "2405.12535v2_figure_5(a).png",
|
| 232 |
+
"caption": "(a) Model error w.r.t. the discretization time \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t.\nFigure 5: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in (a). The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in (b). We set \u03b2=0.1,k=1formulae-sequence\ud835\udefd0.1\ud835\udc581\\beta=0.1,k=1italic_\u03b2 = 0.1 , italic_k = 1 in both figures.",
|
| 233 |
+
"url": "http://arxiv.org/html/2405.12535v2/x14.png"
|
| 234 |
+
},
|
| 235 |
+
"5(b)": {
|
| 236 |
+
"figure_path": "2405.12535v2_figure_5(b).png",
|
| 237 |
+
"caption": "(b) Approximation error w.r.t the amount of data.\nFigure 5: The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the PhiBE solutions and the BE solutions with decreasing \u0394\u2062t\u0394\ud835\udc61{\\Delta t}roman_\u0394 italic_t are plotted in (a). The L2superscript\ud835\udc3f2L^{2}italic_L start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT error (28) of the approximated PhiBE solutions and the approximated BE solutions with increasing amount of data collected every \u0394\u2062t=5\u0394\ud835\udc615{\\Delta t}=5roman_\u0394 italic_t = 5 unit of time are plotted in (b). We set \u03b2=0.1,k=1formulae-sequence\ud835\udefd0.1\ud835\udc581\\beta=0.1,k=1italic_\u03b2 = 0.1 , italic_k = 1 in both figures.",
|
| 238 |
+
"url": "http://arxiv.org/html/2405.12535v2/x15.png"
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
"validation": true,
|
| 242 |
+
"references": [
|
| 243 |
+
{
|
| 244 |
+
"1": {
|
| 245 |
+
"title": "Logarithmic regret for episodic continuous-time linear-quadratic\nreinforcement learning over a finite-time horizon.",
|
| 246 |
+
"author": "Matteo Basei, Xin Guo, Anran Hu, and Yufei Zhang.",
|
| 247 |
+
"venue": "The Journal of Machine Learning Research, 23(1):8015\u20138048,\n2022.",
|
| 248 |
+
"url": null
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"2": {
|
| 253 |
+
"title": "Regularity of invariant measures: the case of non-constant diffusion\npart.",
|
| 254 |
+
"author": "Vladimir I Bogachev, N Krylov, and Michael R\u00f6ckner.",
|
| 255 |
+
"venue": "journal of functional analysis, 138(1):223\u2013242, 1996.",
|
| 256 |
+
"url": null
|
| 257 |
+
}
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"3": {
|
| 261 |
+
"title": "End to end learning for self-driving cars.",
|
| 262 |
+
"author": "Mariusz Bojarski, Davide Del Testa, Daniel Dworakowski, Bernhard Firner, Beat\nFlepp, Prasoon Goyal, Lawrence D Jackel, Mathew Monfort, Urs Muller, Jiakai\nZhang, et al.",
|
| 263 |
+
"venue": "arXiv preprint arXiv:1604.07316, 2016.",
|
| 264 |
+
"url": null
|
| 265 |
+
}
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"4": {
|
| 269 |
+
"title": "Linear least-squares algorithms for temporal difference learning.",
|
| 270 |
+
"author": "Steven J Bradtke and Andrew G Barto.",
|
| 271 |
+
"venue": "Machine learning, 22:33\u201357, 1996.",
|
| 272 |
+
"url": null
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"5": {
|
| 277 |
+
"title": "Model-based control of soft robots: A survey of the state of the art\nand open challenges.",
|
| 278 |
+
"author": "Cosimo Della Santina, Christian Duriez, and Daniela Rus.",
|
| 279 |
+
"venue": "IEEE Control Systems Magazine, 43(3):30\u201365, 2023.",
|
| 280 |
+
"url": null
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"6": {
|
| 285 |
+
"title": "Reinforcement learning in continuous time and space.",
|
| 286 |
+
"author": "Kenji Doya.",
|
| 287 |
+
"venue": "Neural computation, 12(1):219\u2013245, 2000.",
|
| 288 |
+
"url": null
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"7": {
|
| 293 |
+
"title": "Steady states of fokker\u2013planck equations: I. existence.",
|
| 294 |
+
"author": "Wen Huang, Min Ji, Zhenxin Liu, and Yingfei Yi.",
|
| 295 |
+
"venue": "Journal of Dynamics and Differential Equations, 27:721\u2013742,\n2015.",
|
| 296 |
+
"url": null
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"8": {
|
| 301 |
+
"title": "Policy evaluation and temporal-difference learning in continuous time\nand space: A martingale approach.",
|
| 302 |
+
"author": "Yanwei Jia and Xun Yu Zhou.",
|
| 303 |
+
"venue": "The Journal of Machine Learning Research, 23(1):6918\u20136972,\n2022.",
|
| 304 |
+
"url": null
|
| 305 |
+
}
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"9": {
|
| 309 |
+
"title": "Policy gradient and actor-critic learning in continuous time and\nspace: Theory and algorithms.",
|
| 310 |
+
"author": "Yanwei Jia and Xun Yu Zhou.",
|
| 311 |
+
"venue": "The Journal of Machine Learning Research, 23(1):12603\u201312652,\n2022.",
|
| 312 |
+
"url": null
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"10": {
|
| 317 |
+
"title": "q-learning in continuous time.",
|
| 318 |
+
"author": "Yanwei Jia and Xun Yu Zhou.",
|
| 319 |
+
"venue": "Journal of Machine Learning Research, 24(161):1\u201361, 2023.",
|
| 320 |
+
"url": null
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"11": {
|
| 325 |
+
"title": "Model-based reinforcement learning for approximate optimal\nregulation.",
|
| 326 |
+
"author": "Rushikesh Kamalapurkar, Patrick Walters, and Warren E Dixon.",
|
| 327 |
+
"venue": "Automatica, 64:94\u2013104, 2016.",
|
| 328 |
+
"url": null
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"12": {
|
| 333 |
+
"title": "Reinforcement learning in robotics: A survey.",
|
| 334 |
+
"author": "Jens Kober, J Andrew Bagnell, and Jan Peters.",
|
| 335 |
+
"venue": "The International Journal of Robotics Research,\n32(11):1238\u20131274, 2013.",
|
| 336 |
+
"url": null
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"13": {
|
| 341 |
+
"title": "Actor-critic algorithms.",
|
| 342 |
+
"author": "Vijay Konda and John Tsitsiklis.",
|
| 343 |
+
"venue": "Advances in neural information processing systems, 12, 1999.",
|
| 344 |
+
"url": null
|
| 345 |
+
}
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"14": {
|
| 349 |
+
"title": "Kinematic and dynamic vehicle models for autonomous driving control\ndesign.",
|
| 350 |
+
"author": "Jason Kong, Mark Pfeiffer, Georg Schildbach, and Francesco Borrelli.",
|
| 351 |
+
"venue": "In 2015 IEEE intelligent vehicles symposium (IV), pages\n1094\u20131099. IEEE, 2015.",
|
| 352 |
+
"url": null
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"15": {
|
| 357 |
+
"title": "Policy iterations for reinforcement learning problems in continuous\ntime and space\u2014fundamental theory and methods.",
|
| 358 |
+
"author": "Jaeyoung Lee and Richard S Sutton.",
|
| 359 |
+
"venue": "Automatica, 126:109421, 2021.",
|
| 360 |
+
"url": null
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"16": {
|
| 365 |
+
"title": "Human-level control through deep reinforcement learning.",
|
| 366 |
+
"author": "Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness,\nMarc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg\nOstrovski, et al.",
|
| 367 |
+
"venue": "nature, 518(7540):529\u2013533, 2015.",
|
| 368 |
+
"url": null
|
| 369 |
+
}
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"17": {
|
| 373 |
+
"title": "Stochastic differential equations: an introduction with\napplications.",
|
| 374 |
+
"author": "Bernt Oksendal.",
|
| 375 |
+
"venue": "Springer Science & Business Media, 2013.",
|
| 376 |
+
"url": null
|
| 377 |
+
}
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"18": {
|
| 381 |
+
"title": "Training language models to follow instructions with human feedback.",
|
| 382 |
+
"author": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela\nMishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al.",
|
| 383 |
+
"venue": "Advances in Neural Information Processing Systems,\n35:27730\u201327744, 2022.",
|
| 384 |
+
"url": null
|
| 385 |
+
}
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"19": {
|
| 389 |
+
"title": "Stochastic processes and applications.",
|
| 390 |
+
"author": "Grigorios A Pavliotis.",
|
| 391 |
+
"venue": "Springer, 2016.",
|
| 392 |
+
"url": null
|
| 393 |
+
}
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"20": {
|
| 397 |
+
"title": "Off-policy temporal-difference learning with function approximation.",
|
| 398 |
+
"author": "Doina Precup, Richard S Sutton, and Sanjoy Dasgupta.",
|
| 399 |
+
"venue": "In ICML, pages 417\u2013424, 2001.",
|
| 400 |
+
"url": null
|
| 401 |
+
}
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"21": {
|
| 405 |
+
"title": "Mastering the game of go with deep neural networks and tree search.",
|
| 406 |
+
"author": "David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George\nVan Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda\nPanneershelvam, Marc Lanctot, et al.",
|
| 407 |
+
"venue": "nature, 529(7587):484\u2013489, 2016.",
|
| 408 |
+
"url": null
|
| 409 |
+
}
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"22": {
|
| 413 |
+
"title": "Multidimensional diffusion processes, volume 233.",
|
| 414 |
+
"author": "Daniel W Stroock and SR Srinivasa Varadhan.",
|
| 415 |
+
"venue": "Springer Science & Business Media, 1997.",
|
| 416 |
+
"url": null
|
| 417 |
+
}
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"23": {
|
| 421 |
+
"title": "Reinforcement learning: An introduction.",
|
| 422 |
+
"author": "Richard S Sutton and Andrew G Barto.",
|
| 423 |
+
"venue": "MIT press, 2018.",
|
| 424 |
+
"url": null
|
| 425 |
+
}
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"24": {
|
| 429 |
+
"title": "Policy gradient methods for reinforcement learning with function\napproximation.",
|
| 430 |
+
"author": "Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour.",
|
| 431 |
+
"venue": "Advances in neural information processing systems, 12, 1999.",
|
| 432 |
+
"url": null
|
| 433 |
+
}
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"25": {
|
| 437 |
+
"title": "Making deep q-learning methods robust to time discretization.",
|
| 438 |
+
"author": "Corentin Tallec, L\u00e9onard Blier, and Yann Ollivier.",
|
| 439 |
+
"venue": "In International Conference on Machine Learning, pages\n6096\u20136104. PMLR, 2019.",
|
| 440 |
+
"url": null
|
| 441 |
+
}
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"26": {
|
| 445 |
+
"title": "Online actor\u2013critic algorithm to solve the continuous-time infinite\nhorizon optimal control problem.",
|
| 446 |
+
"author": "Kyriakos G Vamvoudakis and Frank L Lewis.",
|
| 447 |
+
"venue": "Automatica, 46(5):878\u2013888, 2010.",
|
| 448 |
+
"url": null
|
| 449 |
+
}
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"27": {
|
| 453 |
+
"title": "Reinforcement learning in continuous time and space: A stochastic\ncontrol approach.",
|
| 454 |
+
"author": "Haoran Wang, Thaleia Zariphopoulou, and Xun Yu Zhou.",
|
| 455 |
+
"venue": "The Journal of Machine Learning Research, 21(1):8145\u20138178,\n2020.",
|
| 456 |
+
"url": null
|
| 457 |
+
}
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"28": {
|
| 461 |
+
"title": "Borrowing from the future: Addressing double sampling in model-free\ncontrol.",
|
| 462 |
+
"author": "Yuhua Zhu, Zachary Izzo, and Lexing Ying.",
|
| 463 |
+
"venue": "In Mathematical and Scientific Machine Learning, pages\n1099\u20131136. PMLR, 2022.",
|
| 464 |
+
"url": null
|
| 465 |
+
}
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"29": {
|
| 469 |
+
"title": "Borrowing from the future: An attempt to address double sampling.",
|
| 470 |
+
"author": "Yuhua Zhu and Lexing Ying.",
|
| 471 |
+
"venue": "In Mathematical and scientific machine learning, pages\n246\u2013268. PMLR, 2020.",
|
| 472 |
+
"url": null
|
| 473 |
+
}
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"30": {
|
| 477 |
+
"title": "Fine-tuning language models from human preferences.",
|
| 478 |
+
"author": "Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario\nAmodei, Paul Christiano, and Geoffrey Irving.",
|
| 479 |
+
"venue": "arXiv preprint arXiv:1909.08593, 2019.",
|
| 480 |
+
"url": null
|
| 481 |
+
}
|
| 482 |
+
}
|
| 483 |
+
],
|
| 484 |
+
"url": "http://arxiv.org/html/2405.12535v2"
|
| 485 |
+
}
|
20240819/2406.13213v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2407.15622v2.json
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "HyperSurf: Quadruped Robot Leg Capable of Surface Recognition with GRU and Real-to-Sim Transferring The authors are with the Intelligent Space Robotics Laboratory, CDE, Skolkovo Institute of Science and Technology, Bolshoy Boulevard 30, bld. 1, 121205, Moscow, Russia Email: {Sergei.Satsevich, Yaroslav.Savotin, Danil.Belov, Elizaveta.Pestova, Artem.Erkhov, Batyr.Khabibullin, Artem.Bazhenov, Vyacheslav.Kovalev, Aleksey.Fedoseev, Dzmitry.Tsetserukou}@skoltech.ru",
|
| 3 |
+
"abstract": "This paper introduces a system of data collection acceleration and real-to-sim transferring for surface recognition on a quadruped robot. The system features a mechanical single-leg setup capable of stepping on various easily interchangeable surfaces. Additionally, it incorporates a GRU-based Surface Recognition System, inspired by the system detailed in the DogSurf paper [1]. This setup facilitates the expansion of dataset collection for model training, enabling data acquisition from hard-to-reach surfaces in laboratory conditions. Furthermore, it opens avenues for transferring surface properties from reality to simulation, thereby allowing the training of optimal gaits for legged robots in simulation environments using a pre-prepared library of digital twins of surfaces. Moreover, enhancements have been made to the GRU-based Surface Recognition System, allowing for the integration of data from both the quadruped robot and the single-leg setup. The dataset and code have been made publicly available.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Quadruped robots get a lot of attention nowadays, due to their potential advantage over wheeled robots to navigate unstructured off-road terrains and urban environments where people live and work. This ability opens a lot of potential applications in different spheres like autonomous inspection, agriculture [2 ###reference_b2###], delivery [3 ###reference_b3###], assisting for visually impaired people, space exploration. However, performing some tasks demands perception of different terrains and consequent adeptness to them or awareness of the user about the type of a surface.\nEmploying quadruped robots as assistants requires an effective system, which is able to evaluate whether a traversed surface is dangerous or not in urban environments. One of the crucial criteria in this case is the robot\u2019s ability to evaluate surface slipperiness. A slippery surface significantly increases the risk of severe injuries, e.g., hip and spine cord fractures from falls. The statistic estimations suggest that hip fracture rates will nearly double by 2050 compared to 2023 levels [4 ###reference_b4###]. Consequently, an assistant quadruped robot must possess the advanced intelligence to warn people about slippery surfaces.\n###figure_1### Investigating the surfaces of celestial bodies involves traversing regolith, which is the predominant material covering the surfaces of Mars, the Moon, and various other celestial bodies. The regolith is often loose, granular, and heterogeneous, posing challenges for traversal with wheeled robots. [5 ###reference_b5###]. Therefore, using quadruped robots to explore the complex surfaces of celestial bodies is rational due to their ability to function and traverse intricate landscapes. However, locomotion in complex environment demands from legged robots to adapt their behavior to different parameters of surfaces, which stimulate to develop different approaches for surface recognition.\nOur method builds upon the DogSurf [1 ###reference_b1###] surface recognition system and expands the effectiveness of its method. In this paper, we have shifted our approach to dataset collection from gathering data by walking on a quadruped robot to collecting data in a laboratory setting while the robot stands on a single leg. This method offers advantages in terms of speed, simplicity, and cost-effectiveness, enabling faster data collection and experimentation, because our new one-leg setup does not require investment of manpower and time for datasets collections. Our GRU-based system offers a cost-effective solution that can be easily implemented across a wide range of legged robotics. Utilizing this setup would facilitate the collection of datasets, which could then be transferred to a quadruped robot or in simulation environment allowing to develop optimal locomotion patterns in simulation. By incorporating feedback from IMU sensor systems and adjusting parameters, this approach can enhance the adaptability of legged robots.\nIn this work, we have expanded our research by systematizing the creation of digital surface duplicates within the Gazebo simulation environment, exemplified through the replication of a floor tile surface. The current state of simulation environments commonly used by the robotics community, such as Gazebo and NVIDIA\u2019s Isaac, lacks extensive libraries of pre-defined surface types that accurately reflect the diversity of real-world terrains. This deficiency presents a significant challenge during the Sim-to-Real deployment phase, where discrepancies between simulated and actual surfaces often lead to substantial errors in robot behavior [6 ###reference_b6###]. Walking robots, particularly quadrupeds, face difficulties when transitioning from controlled simulation environments to the complexities of real-world surfaces, which can differ greatly from the standard options available in these simulations. Through the development and implementation of these digital surface duplicates, we aim to reduce the trial-and-error phase associated with surface recognition and locomotion in physical environments, thereby streamlining the path from simulation to real-world deployment."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Related Works",
|
| 15 |
+
"text": "DogSurf is a novel method employing quadruped robots to aid visually impaired individuals in real-world navigation. This approach empowers the quadruped robot to identify slippery surfaces and provide auditory and tactile cues to signal the user when to halt. Additionally, a cutting-edge GRU-based neural network architecture boasting a mean accuracy of 99.925% was introduced for the multiclass surface classification task tailored specifically for quadruped robots. The dataset used for training was gathered using a Unitree Go1 Edu robot.\nIn addition to DogSurf, several other methodologies for surface recognition are available or currently employed in quadruped robot locomotion.\nFirst approach involves employing various tactile estimation methods. Weerakkodi et al. [7 ###reference_b7###] introduced the utilization of Touch Sensitive Foot (TSF) tactile sensors, positioned on the dog\u2019s feet to gather data, subsequently processed by Convolutional Neural Networks (CNN). This method enables the attainment of a validation accuracy of 74.37% and a peak recognition score of 90% for line patterns. Despite its potential, this approach faces several limitations, such as the high service cost due to direct sensor contact with the surface, leading to gradual wear and tear over time.\nSecond approach involves utilizing audio data. Dimiccoli et al. [8 ###reference_b8###] employed a gripper to manipulate objects and captured audio signals, subsequently utilized for CNN training, resulting in approximately 85% accuracy. Another successful attempt was made by Vangen et al. [9 ###reference_b9###], who suggested employing a sensorized paw for audio-based terrain classification, achieving around 78% accuracy. However, this method exhibits limited real-world applicability due to the diverse ambient sounds present, which notably degrade the neural network\u2019s accuracy.\nThird approach involves utilizing Force/Torque (F/T) sensor data either independently or in conjunction with the aforementioned methods. Bednarek et al. [10 ###reference_b10###] advocated for the utilization of data solely from F/T sensors on the quadrupedal robot ANYmal [11 ###reference_b11###]. Additionally, they introduced a novel CNN-1d convolution architecture and devised a clustering method for terrain classification, achieving an accuracy of 93%. Another method employing a Transformer architecture was proposed in [12 ###reference_b12###] and attained an accuracy of 97.33% on a QCAT dataset. Kolvenbach et al. [13 ###reference_b13###] recommended combining F/T and IMU sensors for the ANYmal robot and developed a specialized test stand, achieving an accuracy of 98.6%.\nOn average, the IMU-based approach yields superior results and offers advantages such as no direct environmental contact, resilience to weather and time-of-day variations, and cost-effectiveness. This method has been successfully implemented in a walking robot utilizing gated recurrent units (GRU) [14 ###reference_b14###]."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "III System Overview",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "III-A Hardware Architecture",
|
| 27 |
+
"text": "To conduct experiments to determine surface types, a special experimental setup was created to ensure repeatability and uniformity of the data obtained when interacting with various surface types. The creation of the stand significantly simplified and reduced the cost of the experiment, as it eliminated the need to use a full-fledged quadruped robot. The main components of the stand (1 ###reference_###) include a full-size leg of a quadruped robot with three degrees of freedom, mounted on a vertical stand, control electronics, an IMU sensor, and the surface for investigation.\n###figure_2### The leg mounting system is designed to allow the leg to move smoothly along the vertical guides, ensuring both stability and consistent, repeatable movements. The vertical guide mechanism is essential for controlling the range of motion, preventing lateral deviations, and ensuring that each motion, particularly the jumps, follows the intended trajectory. The leg\u2019s movements are regulated by the electronic control unit, which sends commands to coordinate the bending and extension of the joints. This coordinated action enables the leg to perform jumping motions, lifting off the surface by approximately 20 mm. These jumps are vital for allowing the robot to interact with the surface under investigation, gathering information without excessive force or disruption. At the end of the leg, an Inertial Measurement Unit (IMU) sensor, specifically the HWT905-RS485 model, is attached. This sensor plays a crucial role in the feedback loop by providing real-time data on the leg\u2019s position, orientation, and movement. The IMU allows the system to monitor the performance of each jump.\nThe leg was designed and manufactured as part of a project focused on developing an autonomous quadruped robotic system. It consists of 3 movable joints and 3 BLDC electric motors that drive its movement. The joint in contact with the surface is equipped with a resilient silicone tip to cushion the contact.\nThe control of electric motors is performed using an algorithm in the micro-ROS environment on the Intel NUC microcomputer. This same computer is responsible for collecting data from the IMU sensor. The entire system is designed for reliable and stable reproduction of the jumping pattern specified by the user on the surface under investigation."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "III-B HyperSurf Control System",
|
| 33 |
+
"text": "In the control system of the HyperSurf robotic leg, a combination of ROS2 and micro-ROS technologies governing its movements. Within the ROS2 package, specific leg trajectories are defined within Python scrips encapsulated in ROS2 packages. These trajectories, created to facilitate leg movement, are translated into motor angles, shaping the physical actions of the robotic limb. The computations required for these calculations were executed on an Intel NUC computer.\nOnce the trajectory calculations are finalized, they are converted into motor angles by an inverse kinematics algorithm. These angles, along with velocity and the coefficients - desired position stiffness and - velocity gain, are transmitted to the leg actuators via the /Hyper dog joints/Commands topic. The stiffness and damping coefficients were selected in advance, taking into account the load on each motor. They determine how quickly the desired position will be achieved and how rigidly this position will be held when external forces are applied. Sending data to the motors is feasible due to the created custom ROS2 message. The data is transmitted in three separate messages due to the leg\u2019s configuration with three motors, each motor possessing a unique identifier (can_id). This enables the distinction between signals sent over a single CAN bus.\nThe micro-ROS agent facilitates the seamless interaction between the high-level computation environment and the low-level hardware control. This agent establishes a vital link between the ROS2 ecosystem running on the computer and the microcontroller system managing the hardware. The /joints/Commands topic published by the ROS2 environment is subscribed to by the /STM_node. The micro-ROS agent processes these commands and translates them into Pulse Width Modulation (PWM) signals, which are then sent to the BLDC motor drivers. By facilitating this communication, the micro-ROS agent ensures that commands from the ROS2 system are accurately and efficiently executed by the hardware, allowing for precise control of the robotic system\u2019s movements and operations.\nHyperSurf not only executes the prescribed leg trajectories but also interfaces with motor encoders, capturing critical feedback on the limb\u2019s position and velocity. This encoder data is meticulously processed, providing insights into the leg\u2019s current state and ensuring precise control over its motion."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.3",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "III-C Conducting the Experiment",
|
| 39 |
+
"text": "To verify the effectiveness of surface recognition using feedback from the IMU sensor, a diverse set of surfaces with distinct properties was assembled for testing. The robotic leg\u2019s interaction was assessed on four different types of surfaces: an EVA polymer sheet (A), a textured ceramic surface (B), a smooth enamel-coated ceramic tile (C), and a carpet (D).\nThe setup ensured that the leg stand remained stationary to avoid any vibrations that might interfere with the performance of the neural network. One of the surfaces was then placed underneath the robotic leg, ensuring it stayed immobile. These jumping tests lasted between 5 to 7 minutes on each surface to gather sufficient data, repeating this process four times for each surface. The surfaces were changed and the jump frequency was varied, ranging from one jump every 0.8 - 2 seconds. Data from the IMU sensor was recorded using its API throughout the experiment. This data was subsequently processed and transformed to prepare it for the training of the neural network, ensuring that the model could accurately recognize and differentiate between the various surfaces based on the sensor feedback."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.4",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III-D Dataset Structure",
|
| 45 |
+
"text": "Throughout the study, a dataset comprising over 1,000,000 samples (equivalent to approximately 1.5 hours of data) was amassed. Data collection encompassed four classes: a sheet of EVA polymer, a textured ceramic surface, a smooth enamel-coated ceramic tile surface, and carpet. During the collection process, the stand operated in automatic stepping mode, varying its frequency to ensure an adequate diversity of data. This variation in frequency was crucial for ensuring that the dataset captured a wide range of interaction scenarios, providing the necessary diversity to improve the robustness and generalization of the subsequent analysis. The large volume of data collected from these different surfaces was critical for training and refining the surface recognition algorithms used in the study."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.5",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-E GRU-based Surface Recognition",
|
| 51 |
+
"text": "Our previous method, DogSurf module utilizes a GRU neural network as the foundation. The IMU data from the accelerometer and gyroscope is collected and appended to a queue containing 100 samples. During each iteration, a Forward Pass is conducted through the first Standard Scaler, then through Principal Component Analysis (PCA) transform, and then through bidirectional GRU neural network.\n###figure_3### Code and other supplementary materials can be accessed by link: https://github.com/Alfasaz/HyperSurf. [15 ###reference_b15###] The results of DogSurf module performance of the HyperSurf compared to the State-of-the-Art are listed in Table I ###reference_###.\nFor HyperSerf we get the new results which represented in Table II ###reference_### and confusion matrix in Fig. 4 ###reference_###.\n###figure_4### The outstanding results of surface recognition open new perspectives of utilizing quadruped robots both as companions, and also in complex landscapes."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "IV Creating a Digital Twin of HyperSurf System",
|
| 57 |
+
"text": "To conduct the stand simulation in the Gazebo environment, a digital twin was created based on the following steps:"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "IV-A Preparation of the stand description file",
|
| 63 |
+
"text": "The three-dimensional model of the stand was meticulously crafted within Computer-Aided Design (CAD) software, leveraging a simplified rendition of the complete quadruped robot designed in Siemens NX. This comprehensive model encompasses all integral components essential for the stand\u2019s functionality.\nTo seamlessly integrate the CAD model into the Gazebo simulator, a Unified Robot Description Format (URDF) was meticulously generated. Crafted through Fusion 360, this URDF file intricately delineates all stand components, their interactions, and kinematic properties. Notably, the URDF generation process was facilitated by a Python script available at the repository [18 ###reference_b18###].\nIn line with the real-world stand setup, an IMU sensor was seamlessly incorporated into the dog\u2019s leg using ROS tools. This addition faithfully mirrors the physical stand\u2019s configuration, thereby enhancing the fidelity and realism of the simulation environment.\nFurthermore, to accurately simulate various surfaces such as tiles, rubber, and brick within the Gazebo environment, meticulous specifications were established. These surface characteristics were detailed comprehensively in the URDF file, with each parameter meticulously tuned to replicate the physical attributes of the real-world surfaces. Key parameters, including the friction coefficient, damping coefficient, and stiffness coefficient, were precisely calibrated to ensure that the simulation dynamics closely mirror the actual conditions. The process of transferring data on surface properties from the real world to the simulation involved careful calibration of these coefficients. The goal was to align the virtual IMU data collected during simulated experiments (leg jumps) with the IMU data recorded in real-world tests. This iterative calibration process ensures that the virtual environment accurately reflects the behavior of the physical surfaces, allowing for a realistic and reliable simulation of surface interactions."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "IV-B Kinematics Control Implementation",
|
| 69 |
+
"text": "Leg kinematics control was realized using symbolic and numerical computation techniques. Forward kinematics determined leg component positions, while inverse kinematics was used to adjust the joint angles to achieve specific end-effector positions. Symbolic computation was performed using SymPy, which facilitated the calculation of transformation matrices and coordinates essential for understanding the geometric relationships between different parts of the leg. CasADi was utilized for numerical optimization, handling the task of adjusting joint angles to meet the required end-effector positions with high precision. This integrated approach, combining symbolic and numerical methods, enabled fine-tuned control of the leg\u2019s movements. It ensured effective interaction with various surfaces and environments within the Gazebo simulator, allowing for robust data collection and analysis."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "IV-C Dataset Collection",
|
| 75 |
+
"text": "In the Gazebo simulator, utilizing the digital twin of the stand, a dataset was gathered to train the surface recognition model. Similar to the real stand, data from the IMU sensor was collected and analyzed. This process aimed to demonstrate the transfer ability of data between the physical and simulated environments, showcasing the bidirectional capability of real-to-simulation and simulation-to-real data integration.\n###figure_5###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Novelty of HyperSurf System",
|
| 81 |
+
"text": "The HyperSurf system presents several innovative advancements that significantly enhance the capabilities of quadruped robot leg surface recognition, both in real-world and simulated environments. The key novelties of the system can be categorized into three primary areas: boosting the speed and efficiency of dataset collection, optimizing the workforce required for data acquisition, and enhancing the accuracy and transferability of data between physical and simulated environments."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.1",
|
| 85 |
+
"parent_section_id": "5",
|
| 86 |
+
"section_name": "Boosting Dataset Collection Efficiency",
|
| 87 |
+
"text": "One of the most notable innovations of the HyperSurf system is its ability to accelerate the process of dataset collection. Traditionally, collecting datasets for surface recognition involved running extensive tests with a fully operational quadruped robot, which is both time-consuming and resource-intensive. By contrast, HyperSurf introduces a one-leg test setup that significantly streamlines this process. On average, the GRU analysis speed was boosted by a factor of 7.5. This remarkable increase in efficiency is primarily due to the rapid deployment of the system and its capability to operate autonomously in controlled laboratory conditions. The reduction in complexity and the focus on a single leg not only speeds up the data collection process but also allows for more frequent iterations and refinements, leading to a faster development cycle."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.2",
|
| 91 |
+
"parent_section_id": "5",
|
| 92 |
+
"section_name": "Optimizing Workforce for Dataset Collection",
|
| 93 |
+
"text": "The HyperSurf system also demonstrates a significant reduction in the manpower required for dataset collection. In previous approaches, data collection was a labor-intensive process that often required the involvement of more than five team members, particularly due to the extensive duration of the experiments and the need to manage a complex, fully assembled quadruped robot. HyperSurf, however, simplifies the experimental setup by utilizing a static, one-leg configuration in a laboratory environment. This simplification not only shortens the time required for each dataset collection session but also reduces the number of personnel needed. In this study, the entire data collection process was efficiently conducted by just two researchers. This reduction in team size not only lowers operational costs but also minimizes the coordination challenges often associated with larger teams, further streamlining the research and development process.\n###figure_6###"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.3",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "Enhancing Data Transfer and Simulation Accuracy",
|
| 99 |
+
"text": "A critical advancement introduced by the HyperSurf system is the improved transferability of data between physical and simulated environments. By leveraging data from an IMU sensor placed on the stand, the HyperSurf system enables empirical modeling of surfaces within the Gazebo simulation environment. As shown in Fig. 5 ###reference_###, this approach facilitates the creation of highly accurate digital twins of physical surfaces. The accuracy of this modeling is highlighted in Fig. 6 ###reference_###, where a comparison between the angular velocities during jumps of the simulated model and the real robotic leg is presented. The results demonstrate a high degree of correlation, with the simulation achieving an accuracy of 98% relative to the real-world data."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "VI Conclusion and Future Work",
|
| 105 |
+
"text": "The HyperSurf technology comprises a specialized leg test setup designed to enhance surface recognition capabilities in quadruped robots. It integrates components such as a mechanical leg with three degrees of freedom, BLDC motors for movement, and an IMU sensor for feedback collection. This setup assists the DogSurf classification model in achieving a precision of up to 98%.\nFuture work will focus on demonstrating how the HyperSurf surface recognition system can enhance quadruped platforms. Feedback from the GRU, processed with the DogSurf architecture, will be utilized to adapt gaits to different surfaces, adjust BLDC motor parameters, and incorporate additional mechanics to improve energy efficiency and terrain traversal. Furthermore, the development of the quadruped platform [15 ###reference_b15###] will continue, integrating the system to enhance its capabilities based on surface recognition feedback. Additionally, efforts will be made to extend the system to object grasping, leveraging surface property information to improve grasping effectiveness.\nIn summary, the HyperSurf technology not only marks a significant milestone in surface recognition for quadruped robots but also sets the stage for broader applications across industries such as search and rescue, agriculture, and industrial inspection. This research underscores the potential of integrating advanced sensory and control systems to enable robust and adaptive autonomous systems capable of navigating complex environments with precision and reliability."
|
| 106 |
+
}
|
| 107 |
+
],
|
| 108 |
+
"appendix": [],
|
| 109 |
+
"tables": {
|
| 110 |
+
"1": {
|
| 111 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE I: </span>Comparison of HyperSurf with the State-of-the-art Approaches.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r\" id=\"S3.T1.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.1.2\">Model</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.1.3\">Accuracy</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.1.1\">Tactile</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.1.2\">Weerakkodi et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib7\" title=\"\">7</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.1.3\">0.74370</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.3.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.3.2.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.1.3.2.1.1\">Audio</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.3.2.2\">Dimiccoli et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib8\" title=\"\">8</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.3.2.3\">0.84700</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.3.1\">Vangen et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib9\" title=\"\">9</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.3.2\">0.77900</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.5.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.5.4.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S3.T1.1.5.4.1.1\">F/T</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.5.4.2\">HAPTR2 <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib12\" title=\"\">12</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.5.4.3\">0.97330</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.6.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.6.5.1\">Jakub Bednarek et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib10\" title=\"\">10</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.6.5.2\">0.93590</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.7.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.7.6.1\">F/T & IMU</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.7.6.2\">Kolvenbach et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib13\" title=\"\">13</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.7.6.3\">0.98600</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.8.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.8.7.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S3.T1.1.8.7.1.1\">IMU</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.8.7.2\">Lomio et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib16\" title=\"\">16</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.8.7.3\">0.64950</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.9.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.9.8.1\">Singh et al. <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2407.15622v2#bib.bib17\" title=\"\">17</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.9.8.2\">0.88000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.10.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.1.10.9.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T1.1.10.9.1.1\">DogSurf</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.1.10.9.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T1.1.10.9.2.1\">0.99925</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 112 |
+
"capture": "TABLE I: Comparison of HyperSurf with the State-of-the-art Approaches."
|
| 113 |
+
},
|
| 114 |
+
"2": {
|
| 115 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE II: </span>Surface Recognition by HyperSurf</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r\" id=\"S3.T2.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T2.1.1.1.2\">Precision</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T2.1.1.1.3\">Recall</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T2.1.1.1.4\">F1 score</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T2.1.2.1.1\">Carpet</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.2.1.2\">0.99467</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.2.1.3\">0.98902</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.2.1.4\">0.99183</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.3.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T2.1.3.2.1\">Rubber</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.3.2.2\">0.98032</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.3.2.3\">0.99032</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.3.2.4\">0.98529</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T2.1.4.3.1\">Tile</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.4.3.2\">0.99695</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.4.3.3\">0.99506</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T2.1.4.3.4\">0.99600</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.5.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T2.1.5.4.1\">Rough tile</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T2.1.5.4.2\">1.00000</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T2.1.5.4.3\">1.00000</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T2.1.5.4.4\">1.00000</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 116 |
+
"capture": "TABLE II: Surface Recognition by HyperSurf"
|
| 117 |
+
}
|
| 118 |
+
},
|
| 119 |
+
"image_paths": {
|
| 120 |
+
"1": {
|
| 121 |
+
"figure_path": "2407.15622v2_figure_1.png",
|
| 122 |
+
"caption": "Figure 1: The experimental stand consists of a leg, fixing stand, control electronics, IMU sensor, and the surface sample under investigation.",
|
| 123 |
+
"url": "http://arxiv.org/html/2407.15622v2/extracted/5800016/Stand.jpg"
|
| 124 |
+
},
|
| 125 |
+
"2": {
|
| 126 |
+
"figure_path": "2407.15622v2_figure_2.png",
|
| 127 |
+
"caption": "Figure 2: HyperSurf system overview, including modules and interfaces developed for surface recognition.",
|
| 128 |
+
"url": "http://arxiv.org/html/2407.15622v2/extracted/5800016/System_overview.jpg"
|
| 129 |
+
},
|
| 130 |
+
"3": {
|
| 131 |
+
"figure_path": "2407.15622v2_figure_3.png",
|
| 132 |
+
"caption": "Figure 3: Neural network architecture.",
|
| 133 |
+
"url": "http://arxiv.org/html/2407.15622v2/extracted/5800016/new.png"
|
| 134 |
+
},
|
| 135 |
+
"4": {
|
| 136 |
+
"figure_path": "2407.15622v2_figure_4.png",
|
| 137 |
+
"caption": "Figure 4: Confusion matrix for HyperSurf.",
|
| 138 |
+
"url": "http://arxiv.org/html/2407.15622v2/x1.png"
|
| 139 |
+
},
|
| 140 |
+
"5": {
|
| 141 |
+
"figure_path": "2407.15622v2_figure_5.png",
|
| 142 |
+
"caption": "Figure 5: Digital twins of the one-leg stand in Gazebo simulator.",
|
| 143 |
+
"url": "http://arxiv.org/html/2407.15622v2/extracted/5800016/standGazebo2.png"
|
| 144 |
+
},
|
| 145 |
+
"6": {
|
| 146 |
+
"figure_path": "2407.15622v2_figure_6.png",
|
| 147 |
+
"caption": "Figure 6: Comparison of the foot angular velocities during the jumps of simulated model and the real robotic leg.",
|
| 148 |
+
"url": "http://arxiv.org/html/2407.15622v2/extracted/5800016/comp.png"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"validation": true,
|
| 152 |
+
"references": [],
|
| 153 |
+
"url": "http://arxiv.org/html/2407.15622v2"
|
| 154 |
+
}
|
20240819/2407.19965v2.json
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Simply Trainable Nearest Neighbour Machine Translation with GPU Inference",
|
| 3 |
+
"abstract": "Nearest neighbor machine translation is a successful approach for fast domain adaption, which interpolates the pre-trained transformers with domain-specific token-level k-nearest-neighbor (kNN) retrieval without retraining. Despite kNN MT\u2019s success, searching large reference corpus and fixed interpolation between the kNN and pre-trained model led to computational complexity and translation quality challenges. Among other papers, Dai et al., (2023) proposed methods to obtain a small number of reference samples dynamically for which they introduced a distance-aware interpolation method using an equation that includes free parameters. This paper proposes a simply trainable nearest neighbor machine translation and carry out inference experiments on GPU. Similar to Dai et al., (2023), we first adaptively construct a small datastore for each input sentence. Second, we train a single-layer network for the interpolation coefficient between the knnMT and pre-trained result to automatically interpolate in different domains. Experimental results on different domains show that our proposed method\neither improves or sometimes maintain the translation quality of methods in Dai et al., (2023) while being automatic. In addition, our GPU inference results demonstrate that knnMT can be integrated into GPUs with a drop of only 5% in terms of speed.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Neural Machine Translation (NMT) has been showing an increasing trend of translation quality owing to the ongoing development of deep neural network models Vaswani et al., (2017 ###reference_b10###); Kim et al., (2021 ###reference_b5###). However, the quality of these models is limited as soon as the domain of the input test sentences is different than the training data.\n\u2020\u2020* equal contribution\nTo handle this out-of-domain problem, k-nearest neighbor machine translation (kNN-MT) has proven to be successful in many studies Khandelwal et al., (2021 ###reference_b4###); Zheng et al., 021a ###reference_b12###; Zheng et al., 021b ###reference_b13###; Jiang et al., (2021 ###reference_b3###); Wang et al., (2022 ###reference_b11###); Meng et al., (2022 ###reference_b6###), and thus piqued much attention in the community of machine translation. At the core of kNN-MT, a kNN classifier over an external datastore is built based on cached decoder representations and corresponding target tokens. This classifier is utilized to augment the given NMT model without finetuning leading to improved predictions, especially for domain adaption. Augmenting the NMT model is done via interpolating between the output probability distribution of the NMT model and the kNN classifier output probability distribution.\nDespite kNN-MT\u2019s noticeable success in alleviating the domain adaption problem, vanilla kNN-MT proposed in Khandelwal et al., (2021 ###reference_b4###) mainly suffers two challenges slowing down kNN-MT\u2019s deployment. First, vanilla kNN-MT requires large datastore sizes resulting in massive storage and expensive latency overheads during inference. For example, Khandelwal et al., (2021 ###reference_b4###) showed that kNN-MT is two orders of magnitude slower than the base NMT system in a generation speed when retrieving 64 keys from a datastore containing billions of records. Second, the interpolation between the NMT model and the kNN classifier is fixed for all sentences in the test sets and manually tuned to improve translation quality.\nTechniques in the literature have been proposed to overcome kNN-MT\u2019s challenges. For example, Meng et al., (2022 ###reference_b6###) designed Fast kNN-MT where a subset of the large datastore is created for each source sentence by searching for the nearest token-level neighbors of the source tokens and mapping them to the corresponding target tokens. Building on Meng et al., (2022 ###reference_b6###), Dai et al., (2023 ###reference_b1###) proposed a simple and scalable kNN-MT that leverages current efficient text retrieval mechanisms, such as BM25 Robertson et al., (2009 ###reference_b9###), to obtain a small number of reference samples that have high semantic similarities with the input sentence, and then dynamically construct a tiny datastore by forwarding the samples to the pre-trained model. Dai et al., (2023 ###reference_b1###) successfully introduced a simple distance-aware interpolation equation to adaptively incorporate kNN retrieval results into the NMT model. However, this simple equation required manual tuning. Along the same line, Jiang et al., (2022 ###reference_b2###) proposed a trainable interpolation method but with six-layer neural network. To the best of our knowledge, these papers did not integrate kNN-MT into GPU inference to observe the trade-off between accuracy and speed results.\nThis paper proposes a simply trainable nearest neighbor machine translation via a single-layer neural network and demonstrates kNN feasibility with GPU inference. Similar to Dai et al., (2023 ###reference_b1###), we reduce the large datastore size by extracting online a small number of reference samples that have high semantic similarities with the input test sentence using the efficient BM25 retrieval algorithm (Robertson et al., (2009 ###reference_b9###)). Based on these insights, we propose a simply trainable single-layer neural network that adaptively interpolates the NMT and knnMT probability distributions per domain in an average of 40 minutes of a single GPU training time. Last but not least, we integrate kNN-MT into FasterTransformer, a highly optimized NMT GPU inference implementation offered by NVIDIA, and observe its speed and accuracy performance on a sparsely activated large-scale MoE. Experimental results show the translation quality effectiveness of our adaptive and automatic interpolation technique and insignificant speed drop of knnMT on GPU. Contributions of this paper are listed below:\nPropose and develop an adaptive and trainable single-layer neural network for knnMT interpolation that trains in 40 minutes. (Section 3 ###reference_###).\nExperimental results show that the proposed method improves the results of Sk-MT in some domains while maintaining the result in some other domains (Section 4.2 ###reference_###).\nIntegrate knnMT interpolation with GPU inference and introduce the speed-to-accuracy trade-off results. (Section 4.3 ###reference_###).\nThis remainder of this paper is organized as follows: Section 2 ###reference_### introduces knnMT and summarizes Sk-MT paving the road to the proposed method in Section 3 ###reference_###. Section 3 ###reference_### provides the experimental results while finally concluding in Section 5 ###reference_###."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Background: kNN-MT",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Vanilla-kNN",
|
| 21 |
+
"text": "In vanilla-kNN, a datastore is created to convert a bilingual sentence into a set of key-value pairs. These keys and values are defined in Equation 1 ###reference_###.\nwhere (x, y) (X, Y) define the reference corpus for which the pretrained NMT model generates the context representation at each time step . Then we collect the output\nhidden state as key and as value to construct the whole datastore .\nAt inference time, the current context representation at decoding step , as well as the already generated words, are leveraged to generate a retrieval distribution over the entire vocabulary:\nP_knn(y_t\u2014x, ^y_\u00a1t) \n\u221d\u2211_(h_i,v_i)\u2208N_tI_y_t=v_i exp(-L2(hi,F(x,^y\u00a1t))T)\nwhere L2 is the Euclidean distance between the current context embedding and the embedding of a token from the data store.\nIn vanilla KNN-MT, a predefined interpolation weight is fixed as a hyperparameter. This weight interpolates between the probability distribution computed from KNN and the probability distribution generated from the pretrained NMT model (see Equation 2.1 ###reference_###).\nP(y_t\u2014x, ^y_\u00a1t)= \u03bb*P_mt( y_t\u2014x, ^y_\u00a1t) \n+\n(1-\u03bb)*P_knn(y_t\u2014x, ^y_\u00a1t)"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "SK-MT",
|
| 27 |
+
"text": "In SK-MT Dai et al., (2023 ###reference_b1###), Elasticsearch is used for semantic retrieval components to create a sentence adaptive datastore instead of a static and extensive datastore used in Vanilla kNN-MT. In specific, Elasticsearch does two main operations: Index & Search; storing parallel sentences in indexes format, and then retrieving 32 sentences per input sentence with the highest relevance score from the training corpus.\nAlso, SK-MT provided a successful way of setting the interpolation coefficient in Equation 2 ###reference_###.\nwhere is the top-1 L2 distance\nduring the nearest neighbor search, is the temperature parameter and is typically fixed."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Single-Layer Trainable Interpolation",
|
| 33 |
+
"text": "Even though SK-MT introduced a simple solution that derives the interpolation weight from the distance, a fixed parameter for all datasets is tuned to produce the best results and is recommended. A fixed temperature may not be optimal for all domains and datasets. For example, Table 1 ###reference_### shows the BLEU score from the Koran dataset when varying from 100 to 500 with a step size of 100. As seen in the table, increases the BLEU score providing evidence that the temperature value can vary with the dataset and does not have to be the recommended SK-MT . This observation motivates a simple and trainable method to adaptively find the temperature parameter for each dataset.\nThe proposed simple neural network consists of a single layer trained to predict the interpolation weight given the distance of the retrieved kNN candidates. This is in contrast to other adaptive interpolation methods e.g. Jiang et al., (2022 ###reference_b2###) that use more layers and learnable parameters. We use the development set of each domain to optimize our single-layer network.\nOur training objective is designed to provide better translation quality. Knowing the ground truth token, we can choose the best interpolation weight that produces the best probability distribution that we can get from the interpolation between and . Thus, our final objective is to create a sharper final probability distribution toward our ground truth token.\nAs shown in Algorithm 1 ###reference_thm1###, our training procedure is divided into two stages at each decoding step.\nThe first stage examines the probability of the ground truth token in both distributions and . If the probability of the ground truth token is higher then we set the label to 1 otherwise we set the label to 0. The second stage trains our single-layer network using binary loss."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Experimental Results",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Experimental Setup",
|
| 45 |
+
"text": "Input stimuli and Datasets: We test our methodology in 2 language directions: German-English (deen), and English-Czech (encs). For deen, we employ the multi-domain dataset as the baseline Khandelwal et al., (2021 ###reference_b4###) in addition to an e-commerce domain. For encs, we utilize two other domains: finance and medpharma. Our evaluation metrics are the SacreBLEU Post, (2018 ###reference_b7###) and COMET-22 (wmt22-COMET-da) Rei et al., (2022 ###reference_b8###), a reference-based metric that combines direct assessments (DA), sentence-level scores, and word-level tags from Multidimensional Quality Metrics (MQM) error annotations.\nModels: Three transformer models are used in our experiments. The first two of the three are used to measure the translation quality; these two are constructed from 12 encoder layers and 12 decoder layers with 512 hidden dimensions and 2048 feedforward layer hidden dimensions with 8 multi-head attention heads. The third transformer is the ZCode M3 model reviewed and presented in Kim et al., (2021 ###reference_b5###). ZCode M3 is constructed from 24 encoder layers and 12 decoder layers with 1024 hidden dimensions and 4096 feedforward layer hidden dimensions with 16 multi-head attention heads. The ZCode M3 has 32 experts, 5B parameters, and 128,000 vocab size.\nBaselines: The model without knnMT is one baseline. To compare to other methods on our inhouse transformers, we utilize the SK-MT method that uses a distance-aware adapter Dai et al., (2023 ###reference_b1###). In Dai et al., (2023 ###reference_b1###), the authors compared with other methods and showed success so we use Dai et al., (2023 ###reference_b1###) as a transitive proxy to compare with other methods.\nGPU Inference Hardware and Environment: Inference and speed evaluation experiments are carried out on a single NVIDIA Tesla V100 GPU. Our inference environment is the highly optimized FasterTransformer from NVIDIA. Without loss of generality, we fix interpolation to and measure the speed."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Trainable kNN Retrieval Results",
|
| 51 |
+
"text": "Table 2 ###reference_### shows the translation quality performance comparison between the proposed trainable method and other baselines. As shown in the table, our proposed trainable method improves the NMT baseline translation quality by a large margin. In addition, the proposed method\nimproves or sometimes maintains the overall translation quality relative to SK-MT on average in terms of the BLEU and COMET\nscores. In some domains like IT and Koran, the proposed method improves the SK-MT performance. This result demonstrates the ability to at least maintain the performance of SK-MT while using a single-layer neural network. Also, these results overall show the adaptability of the proposed method to different datasets. For Medical and medpharma, SK-MT outperforms our proposed method because the datastore built by the dev set does not have any semantic similarity to the training set leading to imbalanced binary labeling, whereas the test does not have this imbalanced binary labeling. To overcome this challenge, we suggest that we add weights to the binary cross-entropy training loss function. With this weighted loss function, our trainable method achieves 57.2 BLEU, 85 COMET in Medical, and 48.1 BLEU, 92.5 COMET in medphrama. These results increase our average results to 48.9 BLEU, and 86.6 COMET, respectively.\nTurning to the translation quality in terms of COMET, we observe that SK-MT still either maintain or improve the quality in the majority of the domains. We also notice that the improvements in BLEU scores do not fully transfer to COMET. We believe that this is due to the fact that COMET is trained on general domain data, therefore it\u2019s less sensitive to domain terminology and more focused on coherence and fluency. For example, e-commerce has an improvement of roughly 6 BLEU points relative to NMT, while the improvement is 0.3 COMET score points."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "GPU Inference Results",
|
| 57 |
+
"text": "Table 3 ###reference_### depicts the speed results of ZCode M3 inference and corresponding BLEU scores in three domains under test namely, IT, Medical, and Law. The kNN-MT results for beam=1, batch=1 setting on the large scale MoE improves the NMT baseline with a large margin while dropping the speed by only 5.2% on average. Similarly, kNN-MT has an improved translation quality with only a drop of 7% relative to NMT as beam and batch increase to 2 and 20, respectively. These results show the potential of deploying the knnMT domain adaption approach in such a large-scale model as ZCode M3."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Conclusion",
|
| 63 |
+
"text": "This paper proposes a simply single-layer trainable nearest-neighbor machine translation and carries out experiments on large-scale models to demonstrate kNN feasibility with GPU Inference. Experimental results show the translation quality effectiveness of our adaptive and automatic interpolation technique relative to other methods in literature, the training simplicity in 40 mins on single-GPU, and insignificant speed drop of knnMT on GPU inference."
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"appendix": [],
|
| 67 |
+
"tables": {
|
| 68 |
+
"1": {
|
| 69 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.1.1\">Temperature (T)</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.1.2\">BLEU</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.1.1\">100</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.1.2\">15.5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.3.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.3.2.1\">200</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.3.2.2\">15.9</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.4.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.3.1\">300</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.3.2\">16.1</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.5.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.5.4.1\">400</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.1.5.4.2\">15.9</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.6.5\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.6.5.1\">500</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.1.6.5.2\">15.5</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Koran Temperature Variation</figcaption>\n</figure>",
|
| 70 |
+
"capture": "Table 1: Koran Temperature Variation"
|
| 71 |
+
},
|
| 72 |
+
"2": {
|
| 73 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Translation quality of the proposed method versus other methods at Beam=5 and K=2.</figcaption>\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S3.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.1.1\">\n<th class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S3.T2.1.1.1.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.1.1.1.1.1.1\">Domain</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" colspan=\"3\" id=\"S3.T2.1.1.1.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.1.1.2.1\">BLEU</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" colspan=\"3\" id=\"S3.T2.1.1.1.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.1.1.3.1\">WMT22-COMET-da</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.2.2\">\n<th class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_th ltx_th_column\" id=\"S3.T2.1.2.2.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.2.1\">NMT</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.3.1\">SK-MT</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.4\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.4.1\">Trainable</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.5\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.5.1\">NMT</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.6\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.6.1\">SK-MT</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_t\" id=\"S3.T2.1.2.2.7\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.2.2.7.1\">Trainable</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T2.1.3.1\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.3.1.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.3.1.1.1.1\">IT</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.3.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.3.1.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.3.1.2.1.1\">38</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.3.1.3\">45.5 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.3.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.3.1.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.3.1.4.1.1\">46.1</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.3.1.5\">83.0 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.3.1.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.3.1.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.3.1.6.1.1\">85.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.3.1.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.3.1.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.3.1.7.1.1\">85.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.4.2\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.4.2.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.4.2.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.4.2.1.1.1\">Law</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.4.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.4.2.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.4.2.2.1.1\">49.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.4.2.3\">62.8 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.4.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.4.2.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.4.2.4.1.1\">62.7</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.4.2.5\">86.7 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.4.2.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.4.2.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.4.2.6.1.1\">88.3</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.4.2.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.4.2.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.4.2.7.1.1\">88.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.5.3\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.5.3.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.5.3.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.5.3.1.1.1\">Koran</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.5.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.5.3.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.5.3.2.1.1\">12.2</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.5.3.3\">15.5 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.5.3.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.5.3.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.5.3.4.1.1\">16.4</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.5.3.5\">69.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.5.3.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.5.3.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.5.3.6.1.1\">70.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.5.3.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.5.3.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.5.3.7.1.1\">70.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.6.4\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.6.4.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.6.4.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.6.4.1.1.1\">e-commerce</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.6.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.6.4.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.6.4.2.1.1\">52.5</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.6.4.3\">58.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.6.4.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.6.4.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.6.4.4.1.1\">58.5</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.6.4.5\">90.7 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.6.4.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.6.4.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.6.4.6.1.1\">90.9</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.6.4.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.6.4.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.6.4.7.1.1\">90.9</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.7.5\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.7.5.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.7.5.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.7.5.1.1.1\">finance</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.7.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.7.5.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.7.5.2.1.1\">48.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.7.5.3\">53.3 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.7.5.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.7.5.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.7.5.4.1.1\">53.3</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.7.5.5\">70.6 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.7.5.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.7.5.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.7.5.6.1.1\">94.2</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.7.5.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.7.5.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.7.5.7.1.1\">93.9</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.8.6\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.8.6.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.8.6.1.1.1\">Medical</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.8.6.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.8.6.2.1.1\">42.7</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.3\">57.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.8.6.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.8.6.4.1.1\">57.2</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.5\">83.9 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.8.6.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.8.6.6.1.1\">85.2</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T2.1.8.6.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.8.6.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.8.6.7.1.1\">85.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.9.7\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T2.1.9.7.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.9.7.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.9.7.1.1.1\">medpharma</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.9.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.9.7.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.9.7.2.1.1\">41.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.9.7.3\">47.4 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.9.7.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.9.7.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.9.7.4.1.1\">48.1</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.1.9.7.5\">92.2 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.9.7.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.9.7.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.9.7.6.1.1\">92.0</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T2.1.9.7.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.9.7.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.9.7.7.1.1\">92.5</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.1.10.8\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.10.8.1.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.10.8.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T2.1.10.8.1.1.1.1\">AVERAGE</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.10.8.2.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.10.8.2.1.1\">40.8</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.3\">48.5 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.10.8.4.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.10.8.4.1.1\">48.9</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.5\">82.4 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.10.8.6.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.10.8.6.1.1\">86.5</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T2.1.10.8.7\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T2.1.10.8.7.1\">\n<span class=\"ltx_p\" id=\"S3.T2.1.10.8.7.1.1\">86.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 74 |
+
"capture": "Table 2: Translation quality of the proposed method versus other methods at Beam=5 and K=2."
|
| 75 |
+
},
|
| 76 |
+
"3": {
|
| 77 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>GPU Inference Results on ZCode M3 Model.</figcaption>\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S3.T3.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T3.1.1.1\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_tt ltx_border_t\" id=\"S3.T3.1.1.1.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt ltx_border_t\" colspan=\"3\" id=\"S3.T3.1.1.1.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.1.1.2.1\">beam=1, batch=1</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt ltx_border_t\" colspan=\"3\" id=\"S3.T3.1.1.1.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.1.1.3.1\">beam=2, batch=20</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.2.2\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify\" id=\"S3.T3.1.2.2.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.2.2.1.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.2.2.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.2.2.1.1.1.1\">Domain</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" colspan=\"2\" id=\"S3.T3.1.2.2.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.2.2.2.1\">BLEU</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.2.2.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.2.2.3.1\">Speed Drop (%)</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" colspan=\"2\" id=\"S3.T3.1.2.2.4\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.2.2.4.1\">BLEU</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.2.2.5\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.2.2.5.1\">Speed Drop (%)</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.3.3\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify\" id=\"S3.T3.1.3.3.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.3.3.2\">NMT \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.3.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.3.3.3.1.1\">knn-MT</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.1.3.3.4\">\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.3.3.5\">NMT \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.3.3.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.3.3.6.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.3.3.6.1.1\">knn-MT</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.1.3.3.7\">\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.4.4\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T3.1.4.4.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.4.4.1.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.4.4.1.1.1\">IT</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.4.4.2\">37.6 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.4.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.4.4.3.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.4.4.3.1.1\">43.8</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.4.4.4\">4.9 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.4.4.5\">37.4 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.4.4.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.4.4.6.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.4.4.6.1.1\">43.7</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.4.4.7\">6.5 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.5.5\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T3.1.5.5.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.5.5.1.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.5.5.1.1.1\">Medical</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.5.5.2\">45.6 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.5.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.5.5.3.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.5.5.3.1.1\">55.6</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.5.5.4\">5.0 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.5.5.5\">45.8 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.5.5.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.5.5.6.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.5.5.6.1.1\">56.3</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.5.5.7\">9.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.6.6\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_t\" id=\"S3.T3.1.6.6.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.6.6.1.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.6.6.1.1.1\">Law</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.6.6.2\">54.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.6.6.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.6.6.3.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.6.6.3.1.1\">61.8</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.6.6.4\">5.8 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.1.6.6.5\">54.1 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_t\" id=\"S3.T3.1.6.6.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.6.6.6.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.6.6.6.1.1\">62.2</span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.1.6.6.7\">5.5 \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.1.7.7\">\n<td class=\"ltx_td ltx_nopad_l ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.1\">\n<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\u00a0\u00a0\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.7.7.1.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.7.7.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.1.1.1.1\">AVERAGE</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.2.1\">45.7</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.7.7.3.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.7.7.3.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.3.1.1.1\">53.7</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.4\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.4.1\">5.2</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.5\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.5.1\">45.7</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T3.1.7.7.6.1\">\n<span class=\"ltx_p\" id=\"S3.T3.1.7.7.6.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.6.1.1.1\">54.0</span></span>\n</span>\u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb ltx_border_tt\" id=\"S3.T3.1.7.7.7\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S3.T3.1.7.7.7.1\">7.0</span> \u00a0\u00a0<span class=\"ltx_rule\" style=\"width:1.5pt;background:black;display:inline-block;\">\u00a0</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 78 |
+
"capture": "Table 3: GPU Inference Results on ZCode M3 Model."
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
"image_paths": {},
|
| 82 |
+
"validation": true,
|
| 83 |
+
"references": [
|
| 84 |
+
{
|
| 85 |
+
"1": {
|
| 86 |
+
"title": "Simple and scalable nearest neighbor machine translation.",
|
| 87 |
+
"author": "Dai, Y., Zhang, Z., Liu, Q., Cui, Q., Li, W., Du, Y., and Xu, T. (2023).",
|
| 88 |
+
"venue": "In The Eleventh International Conference on Learning Representations (ICLR).",
|
| 89 |
+
"url": null
|
| 90 |
+
}
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"2": {
|
| 94 |
+
"title": "Towards robust k-nearest-neighbor machine translation.",
|
| 95 |
+
"author": "Jiang, H., Lu, Z., Meng, F., Zhou, C., Zhou, J., Huang, D., and Su, J. (2022).",
|
| 96 |
+
"venue": "In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 5468\u20135477, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.",
|
| 97 |
+
"url": null
|
| 98 |
+
}
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"3": {
|
| 102 |
+
"title": "Learning kernel-smoothed machine translation with retrieved examples.",
|
| 103 |
+
"author": "Jiang, Q., Wang, M., Cao, J., Cheng, S., Huang, S., and Li, L. (2021).",
|
| 104 |
+
"venue": "In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7280\u20137290, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 105 |
+
"url": null
|
| 106 |
+
}
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"4": {
|
| 110 |
+
"title": "Nearest neighbor machine translation.",
|
| 111 |
+
"author": "Khandelwal, U., Fan, A., Jurafsky, D., Zettlemoyer, L., and Lewis, M. (2021).",
|
| 112 |
+
"venue": "In International Conference on Learning Representations (ICLR).",
|
| 113 |
+
"url": null
|
| 114 |
+
}
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"5": {
|
| 118 |
+
"title": "Scalable and efficient moe training for multitask multilingual models.",
|
| 119 |
+
"author": "Kim, Y. J., Awan, A. A., Muzio, A., Salinas, A. F. C., Lu, L., Hendy, A., Rajbhandari, S., He, Y., and Awadalla, H. H. (2021).",
|
| 120 |
+
"venue": "arXiv preprint arXiv:2109.10465.",
|
| 121 |
+
"url": null
|
| 122 |
+
}
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"6": {
|
| 126 |
+
"title": "Fast nearest neighbor machine translation.",
|
| 127 |
+
"author": "Meng, Y., Li, X., Zheng, X., Wu, F., Sun, X., Zhang, T., and Li, J. (2022).",
|
| 128 |
+
"venue": "In Findings of the Association for Computational Linguistics: ACL 2022, pages 555\u2013565, Dublin, Ireland. Association for Computational Linguistics.",
|
| 129 |
+
"url": null
|
| 130 |
+
}
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"7": {
|
| 134 |
+
"title": "A call for clarity in reporting BLEU scores.",
|
| 135 |
+
"author": "Post, M. (2018).",
|
| 136 |
+
"venue": "In Bojar, O., Chatterjee, R., Federmann, C., Fishel, M., Graham, Y., Haddow, B., Huck, M., Yepes, A. J., Koehn, P., Monz, C., Negri, M., N\u00e9v\u00e9ol, A., Neves, M., Post, M., Specia, L., Turchi, M., and Verspoor, K., editors, Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186\u2013191, Brussels, Belgium. Association for Computational Linguistics.",
|
| 137 |
+
"url": null
|
| 138 |
+
}
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"8": {
|
| 142 |
+
"title": "Comet-22: Unbabel-ist 2022 submission for the metrics shared task.",
|
| 143 |
+
"author": "Rei, R., De Souza, J. G., Alves, D., Zerva, C., Farinha, A. C., Glushkova, T., Lavie, A., Coheur, L., and Martins, A. F. (2022).",
|
| 144 |
+
"venue": "In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 578\u2013585.",
|
| 145 |
+
"url": null
|
| 146 |
+
}
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"9": {
|
| 150 |
+
"title": "The probabilistic relevance framework: Bm25 and beyond.",
|
| 151 |
+
"author": "Robertson, S., Zaragoza, H., et al. (2009).",
|
| 152 |
+
"venue": "Foundations and Trends\u00ae in Information Retrieval, 3(4):333\u2013389.",
|
| 153 |
+
"url": null
|
| 154 |
+
}
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"10": {
|
| 158 |
+
"title": "Attention is all you need.",
|
| 159 |
+
"author": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017).",
|
| 160 |
+
"venue": null,
|
| 161 |
+
"url": null
|
| 162 |
+
}
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"11": {
|
| 166 |
+
"title": "Efficient cluster-based -nearest-neighbor machine translation.",
|
| 167 |
+
"author": "Wang, D., Fan, K., Chen, B., and Xiong, D. (2022).",
|
| 168 |
+
"venue": "In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2175\u20132187, Dublin, Ireland. Association for Computational Linguistics.",
|
| 169 |
+
"url": null
|
| 170 |
+
}
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"12": {
|
| 174 |
+
"title": "Adaptive nearest neighbor machine translation.",
|
| 175 |
+
"author": "Zheng, X., Zhang, Z., Guo, J., Huang, S., Chen, B., Luo, W., and Chen, J. (2021a).",
|
| 176 |
+
"venue": "In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 368\u2013374, Online. Association for Computational Linguistics.",
|
| 177 |
+
"url": null
|
| 178 |
+
}
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"13": {
|
| 182 |
+
"title": "Non-parametric unsupervised domain adaptation for neural machine translation.",
|
| 183 |
+
"author": "Zheng, X., Zhang, Z., Huang, S., Chen, B., Xie, J., Luo, W., and Chen, J. (2021b).",
|
| 184 |
+
"venue": "In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4234\u20134241, Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 185 |
+
"url": null
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
],
|
| 189 |
+
"url": "http://arxiv.org/html/2407.19965v2"
|
| 190 |
+
}
|
20240819/2408.05884v2.json
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Wireless MAC Protocol Synthesis and Optimization with Multi-Agent Distributed Reinforcement Learning",
|
| 3 |
+
"abstract": "In this letter, we propose a novel Multi-Agent Deep Reinforcement Learning (MADRL) framework for Medium Access Control (MAC) protocol design. Unlike centralized approaches, which rely on a single entity for decision-making, MADRL empowers individual network nodes to autonomously learn and optimize their MAC based on local observations. Leveraging ns3-ai and RLlib, as far as we are aware of, our framework is the first of a kind that enables distributed multi-agent learning within the ns-3 environment, facilitating the design and synthesis of adaptive MAC protocols tailored to specific environmental conditions. We demonstrate the effectiveness of the MADRL MAC framework through extensive simulations, showcasing superior performance compared to legacy protocols across diverse scenarios. Our findings highlight the potential of MADRL-based MAC protocols to significantly enhance Quality of Service (QoS) requirements for future wireless applications.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Wireless networks are continuously faced with a multitude of demands, ranging from high-reliability and low-latency connectivity to support of bandwidth-intensive applications such as virtual reality (VR), gaming, and holographic video. These various applications highlight the growing need for adaptable and application-specific channel access mechanisms and resource allocation. However, the large number of configurable parameters and their entangled inter-dependencies in the medium access control (MAC) layer, for example, pose challenges to optimize and fine-tune protocols with traditional methods due to the dynamic nature of wireless networks [1 ###reference_b1###], especially in uncoordinated environments such as unlicensed bands lacking a centralized authority to regulate channel access across all network nodes.\nIn recent years, AI-driven approaches, particularly deep reinforcement learning (DRL), have shown great promise in optimizing wireless network performance [2 ###reference_b2###] by letting MAC protocols to learn and adapt autonomously based on real-time feedback from the environment, and ensuring more intelligent and adaptive behavior. However, most of the existing works in this area optimize and configure only a few parameters in the MAC layer or physical layer[3 ###reference_b3###]. Some recent studies also explore applications of DRL for generating protocols and signalling for cellular [4 ###reference_b4###, 5 ###reference_b5###] and Wi-Fi networks [6 ###reference_b6###, 7 ###reference_b7###]. While these studies make significant step forward, the proposed solutions rely on centralized entities for training. Even in instances where distributed inference is employed, the overarching approach remains centralized, hampering scalability and adaptability, particularly in dynamic network environments where decentralized decision-making is desirable.\nExpanding upon our prior work, [6 ###reference_b6###, 8 ###reference_b8###], which proposed a DRL-based MAC design framework with centralized learning and execution mechanisms for Wi-Fi networks, in this letter, we introduce a Multi-Agent Deep Reinforcement Learning\n(MADRL) framework that incorporates both centralized and distributed learning, along with distributed inference. This advancement increases flexibility and adaptability, empowering individual nodes to manage diverse traffic loads and environmental conditions effectively. We implement our MADRL framework within ns-3 by integrating ns3-ai[9 ###reference_b9###] and RLlib. To the best of our knowledge, this is the first implementation that enables distributed multi-agent learning using ns3-ai and Ray RLlib in ns-3 environment. Furthermore, we extend the capabilities of our framework to support 5G New Radio Unlicensed (5G NR-U) technology, which demonstrates the versatility of our solution across emerging wireless technologies beyond Wi-Fi networks. We test and verify the performance of our framework and the learnt protocols through extensive system-level\nsimulations and demonstrate the superior performance of the MADRL-synthesized protocols over legacy protocols across diverse scenarios. This underlines the potential of our framework to significantly\nenhance QoS for future applications through a novel protocol design approach."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II System Concept",
|
| 15 |
+
"text": "Figure 1 ###reference_### illustrates our system concept.\nThe synthesis of MAC protocols is performed from a set of atomic building blocks interconnected through Machine Learning (ML) driven policies. To drive this process, we supply the ML framework with atomic building block functions such as backoff, sensing, defer, modulation and coding scheme and all of their related parameters along with application requirements and environment characteristics such as type of traffic and packet arrival rates, and number of nodes. The ML framework synthesizes a new protocol, which is subsequently evaluated in the ns-3 simulator using a pre-defined reward, which is selected to fit the application requirements. Upon computation of the reward, feedback loops back to the ML framework. This iterative process continues until a protocol is constructed that incorporates the optimal set of building blocks for the current network/environment configuration. This modular approach empowers the agents to discern the most effective combination of these blocks, thereby generating novel MAC protocols or refining existing ones.\n###figure_1### In this letter, we showcase the MAC protocols synthesis with our MADRL framework in a network comprising of 5G New Radio-Unlicensed (5G NR-U) gNBs.\n5G NR-U is a radio access technology that is being developed by 3GPP and first introduced in Release 16[10 ###reference_b10###].\nOne of the key features is the channel access mechanism, namely Listen Before Talk (LBT), inherited from the LTE licensed-assisted access (LTE-LAA), operating in 5 GHz unlicensed band. Before transmission, 5G NR-U devices have to sense the channel to ensure harmonious coexistence with other unlicensed devices, such as IEEE 802.11ax.\n5G NR-U devices perform LBT procedure in the downlink, similar to Wi-Fi\u2019s CSMA/CA protocol. As shown in Figure 2 ###reference_### after a period of idle channel, , lasting , the gNB initiates Clear Channel Assessment (CCA) in a sequence of consecutive observation CCA slots, each with a duration of . Subsequently, the deferred period is computed as + . Where is determined based on the priority assigned to various traffic types in the standard. If the channel remains idle throughout the defer time, the gNB initiates the backoff procedure by randomly selecting a number from the set while continuing to sense the channel and decrementing the backoff counter. Upon reaching zero, the gNB starts transmission. If the channel becomes occupied during any of these slots, the backoff counter freezes, and the process restarts once the channel becomes idle again with the remaining backoff counter from the previous attempt. Upon gaining access to the channel, the gNBs can occupy it for a maximum duration, known as Maximum Channel Occupancy Time (MCOT). MCOT has different values for different traffic types as defined in the standard specification[10 ###reference_b10###].\n###figure_2###"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "III Multi-Agent Deep Reinforcement Learning for Reconfigurable MAC protocol",
|
| 21 |
+
"text": "Our goal is to develop a multi-agent learning algorithm for reconfigurable MAC that can effectively adjust according to real wireless network scenarios while overcoming the complexity due to large parameter spaces and partial observability of the environment. First, we define the agent\u2019s decision process as a Partially Observable Markov Decision Process (POMDP), consisting of observations, actions, and rewards. Thereafter, we leverage the proximal policy optimization (PPO) algorithm to train the agents efficiently in a distributed manner."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "III-A Problem Formulation",
|
| 27 |
+
"text": "Our ultimate objective is to maximize the long-term throughput averaged over all gNBs and episode time steps, as follows:\nwhere is the aggregated downlink throughput per gNB.\nWe consider a network environment where multiple gNBs are deployed, each serving a particular area with a diverse set of traffic types, including Poisson traffic with different arrival rates, and augmented and/or virtual reality (AR/VR) traffic, modeled as bursty traffic with different frame rates [11 ###reference_b11###]. We assume a partial observability of the environment at each gNB. Each node has complete autonomy in creating the MAC protocol and adjusting the protocol parameters. This means each agent has the capability to manipulate the deferred period () by modifying the size and number of clear channel assessment slots or the defer time , as well as adjusting the backoff number, size, and its functions. This allows each agent to create various types of MAC protocols. Moreover, agents can control parameters such as the energy detection threshold () and the transmission power ()."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "III-B Partially Observable Markov Decision Process (POMDP)",
|
| 33 |
+
"text": "Observation space : Observation () = , , , , , , , The observation space of agent is defined as tuple, which includes the current MAC protocol blocks specified by the , the number of visible nodes in the surrounding area , which depends on the energy detection threshold, the interference from other nodes (), and the received power level from the connected user (). The agent can also calculate the throughput, and the delay . The parameter is the airtime occupied by other users on the channel, which is obtained through the sensing capability of the agent. We assume that each node broadcasts its traffic characteristics and the aggregated downlink throughput. Broadcasting can be done using the X2 interface defined in NR protocols for communication between neighbour nodes.\nAction :\nAction () = , , , , , , , \nThe action space for each agent is defined as a tuple containing MAC block functions and their parameters that determine the behaviour of the MAC protocol. These parameters include the backoff function type and its relevant parameters, such as the sensing slot duration , the minimum contention window size , the energy detection threshold and the defer time . Additionally, the tuple specifies the modulation and coding scheme , the maximum channel occupancy time , and the transmission power . Each agent makes decisions on whether to include specific MAC protocol blocks and choose appropriate values for parameters. Table I ###reference_### provides a summary of the action space parameters and their corresponding values. Each parameter in the action space has a range of possible values, allowing agents to make diverse decisions when configuring the MAC protocol.\n###figure_3### Reward :\nEach agent broadcasts its throughput and traffic rate to the nodes within its range. Each node can also calculate the airtime of other nodes within its range. We define the reward for each agent as follows:\nwhere represents the mean normalized aggregated downlink throughput of network and is the normalized traffic arrival rate and denotes the normalized airtime of gNB. Both and are normalized with respect to other nodes within their sensing range. The reward encourages effective usage of the channel by minimizing airtime while maximizing throughput. Additionally, the reward function discourages greediness among agents by considering the throughput and airtime of other nodes within range.\nInput:\nAll training parameters from Table II ###reference_###\n\n Output:\n, where"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.3",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "III-C PPO for multi-agent reconfigurable wireless MAC protocol",
|
| 39 |
+
"text": "We use Proximal Policy Optimization (PPO) for designing MAC policies across different deployment scenarios [12 ###reference_b12###], developed by OpenAI. PPO is an actor-critic algorithm, meaning it employs two separate neural networks, for value and policy estimation. We adopt a fully distributed approach for both learning and execution. Each gNB node hosts a single agent dedicated to training and inference tasks as illustrated in Figure 3 ###reference_###. Each agent operates autonomously with its own dedicated neural networks, ensuring complete autonomy and decentralization.\nAs shown in Algorithm 1, every agent initializes the value networks and policy networks with the respective parameters and and maintains separate mini-batches. For each iteration, the policy at each node is executed in the environment independently, and each agent accumulates its experiences according to the PPO algorithm (lines 5-6), facilitating individualized learning and adaptation.\nSubsequently, the advantage function is computed for each time step at each node. The advantage function measures the potential benefit of choosing a particular action in a certain state compared to the average outcome expected when following the current policy, and is defined as follows:\nwhere the first term, discounted returns , is calculated using the collected rewards, and is the value estimate for each observation from the value network.\nTo optimize the policy and value networks, we randomly collect samples and add them to mini-batches. The value network is optimized by minimizing the value loss, which is defined as the mean squared error between the predicted values and the computed target values.\nFollowing this, we proceed to update the parameters, , of the value network. This is accomplished by minimizing the value loss by using gradient descent.\nConcurrently, the optimization process in PPO involves updating the policy network by maximizing a clipped surrogate objective, which is given by This loss function uses the ratio of the new policy to the old policy, which is computed as follows:\nAdditionally, a clipping function is used to ensure that updates are not extreme, as significant deviations could destabilize learning progress. Here, represents the clipping parameter.\nThe entropy term, denoted by , encourages exploration by the policy and prevents premature settling on suboptimal deterministic policies. The entropy term is defined as:\nwhere parameter acts as a coefficient that controls the weight of the entropy term.\nUltimately, the policy network parameters are updated by maximizing the objective using a gradient ascent.\n###figure_4### During the training phase, the broadcasting of rewards and traffic characteristics can be accomplished using the X2 interface defined in NR protocols for communication between neighbouring nodes or added to the packet header."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "IV Simulation and Learning Environment",
|
| 45 |
+
"text": "We implement our MADRL framework by integrating ns3-ai and RLlib. To ensure compatibility between ns3-ai and RLlib, we have created a dummy environment within RLlib, which collects the data from the ns3-ai gym environment. Our primary simulation is the ns-3, with ns3-ai responsible for transferring actions and observations between ns-3 and RLlib\u2019s dummy environment, as illustrated in Figure 4 ###reference_###. Observations collected from the ns-3 environment are relayed to the dummy environment, where agents analyze them to determine suitable actions. These actions are then directly applied to the agent within the ns-3 simulation environment, enabling uninterrupted simulation. We use the 5G NR-U module, which is a full-stack implementation of NR, including the channel access mechanism specified for NR-U technology. We have ensured its full functionality and compatibility with ns3-ai.\nThe simulation and training processes were conducted on a server equipped with 2 GPU units and 64 CPU cores.\nThe ns-3 simulations ran on the CPU, while the training process, involving machine learning algorithms and neural network models, ran simultaneously on the GPU, for a faster convergence of the overall learning process.\nFigure 5 ###reference_### illustrates the learning convergence of the proposed distributed training and execution approach, DTDE, against the centralized training and execution approach, CTCE introduced in [6 ###reference_b6###]. In CTCE, a single agent is responsible for both training and execution. The distributed approach converges notably faster than the centralized approach, which can be attributed to the CTCE\u2019s requirement for the agent to manage a significantly larger action space, thereby slowing down the learning process. It is also worth noting that due to the distributed nature of the system and the lack of full control and knowledge over other nodes, DTDE achieves a slightly lower mean reward compared to centralized learning.\n###figure_5###"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Performance Evaluation",
|
| 51 |
+
"text": "For the evaluation of our distributed multi-agent approach, i.e., DTDE, we randomly deployed six 5G NR-U gNBs in an area of . Each gNB is connected to at least one UE. At each gNB a single agent is deployed that learns the optimal MAC blocks and their parameters based on the current environment and application requirements. We consider Poison and AR/VR traffic, which has bursty characteristics [11 ###reference_b11###]. We evaluate the performance of the synthesized MAC protocol in terms of mean downlink throughput and average end-to-end packet delay per gNB.\nFigure LABEL:fig:results shows the evaluation results for various traffic densities and types. We consider four traffic densities based on packet arrival rates , i.e., low traffic (10 to 500 packets/sec), medium traffic (500 to 1000 packets/sec), high traffic (1000 to 3000 packets/sec), and random-rate traffic (10 to 3000 packets/sec). We ran our distributed approach for 10 episodes, with each agent using its learning model. Afterwards, we compared its performance with that of the standard-based 5G NR-U and the centralized approach, i.e., CTCE. The system parameters of 5G NR-U are listed in Table I ###reference_###.\nFigures LABEL:TSUS_1_1 and LABEL:TSUS_2_1 illustrate the distribution of mean throughput and delay across all nodes in the environment for low-density Poisson and AR/VR traffic. As the contention for channel access is minimal and all the packets in the queue can be successfully transmitted, both baselines (the standard 5G NR-U and CTCE), and our distributed multi-agent NR-U protocol show similar throughput. However, in terms of delay, both learning approaches show improvement due to the selection of appropriate MAC blocks and parameters and removing the unnecessary overhead in the standards 5G NR-U.\nFigures LABEL:TSUS_1_2, LABEL:TSUS_1_3, and LABEL:TSUS_1_4 display the throughput distributions for medium, high, and mixed-rate traffic. The results demonstrate that our distributed approach improves mean throughput by at least , primarily due to the reduction of carrier sensing overhead and the dynamic selection of MAC protocol blocks tailored to each node\u2019s specific requirements. This is achieved by selecting the appropriate backoff algorithm, deferring, and sensing parameters at each node based on the environmental characteristics observed by each agent.\nIt is also worth noting that some nodes achieved significantly higher throughput without adversely affecting others, as indicated by the red outlier points. This improvement is due to our framework\u2019s ability to select not only the optimal MAC protocols for each scenario but also to adjust transmission power levels to minimize interference with other coexisting nodes. Additionally, nodes adjust their sensitivity to interference from neighbouring nodes by changing , which allows nodes to access the channel more freely, similar to the Basic Service Set (BSS) coloring technique used in Wi-Fi technology.\nAs a result, nodes gain more opportunities to transmit, leading to a significant reduction in end-to-end packet delay, as shown in Figures LABEL:TSUS_2_2, LABEL:TSUS_2_3, and LABEL:TSUS_2_4.\nOverall, our distributed multi-agent NR-U protocol consistently surpasses the standard 5G NR-U protocol and closely matches the performance of the centralized baseline, despite each agent having only partial observation compared to the centralized model, which possesses complete knowledge. This success is largely attributed to our reward function, which ensures that each agent at each gNB considers not only its own performance but also that of neighboring nodes within its sensing range."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "6",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "VI Conclusions",
|
| 57 |
+
"text": "In this letter, we have proposed a MADRL framework that leverages distributed multi-agent machine learning to empower individual network nodes to autonomously optimize design and configure MAC protocols, thus overcoming the limitations of centralized decision-making. By enabling nodes to customize their Medium access based on local observations, our approach offers adaptability and scalability tailored to specific environmental conditions. Through extensive simulations, we have demonstrated the superiority of MADRL-synthesized protocols over the legacy 5G NR-U MAC, highlighting the potential of the new protocol design approach to enhance QoS for future wireless applications."
|
| 58 |
+
}
|
| 59 |
+
],
|
| 60 |
+
"appendix": [],
|
| 61 |
+
"tables": {
|
| 62 |
+
"1": {
|
| 63 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption\" style=\"font-size:90%;\"><span class=\"ltx_tag ltx_tag_table\">Table I: </span>The Action Space</figcaption>\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.11\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.11.12.1\">\n<th class=\"ltx_td ltx_align_top ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.11.12.1.1\"></th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.11.12.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.12.1.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.12.1.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T1.11.12.1.2.1.1.1\" style=\"font-size:90%;\">Action parameter</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.11.12.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.12.1.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.12.1.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T1.11.12.1.3.1.1.1\" style=\"font-size:90%;\">Values Range</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.11.12.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.12.1.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.12.1.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S3.T1.11.12.1.4.1.1.1\" style=\"font-size:90%;\">Standard value</span></span>\n</span>\n</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.1.1.2.1.1.1\" style=\"font-size:90%;\">Sensing Slot Size</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.1.1.3.1.1.1\" style=\"font-size:90%;\">{0, 1, 2, \u2026, 20}</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.1.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.1.1.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.1.1.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.1.1.4.1.1.1\" style=\"font-size:90%;\">9</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.2.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.2.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.2.2.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.2.2.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.2.2.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.2.2.2.1.1.1\" style=\"font-size:90%;\">Backoff type</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.2.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.2.2.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.2.2.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.2.2.3.1.1.1\" style=\"font-size:90%;\">Off, EDID,</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.2.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.2.2.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.2.2.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.2.2.4.1.1.1\" style=\"font-size:90%;\">BEB</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.11.13.1\">\n<td class=\"ltx_td ltx_align_top ltx_border_l ltx_border_r\" id=\"S3.T1.11.13.1.1\"></td>\n<td class=\"ltx_td ltx_align_top ltx_border_r\" id=\"S3.T1.11.13.1.2\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r\" id=\"S3.T1.11.13.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.13.1.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.13.1.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.11.13.1.3.1.1.1\" style=\"font-size:90%;\">BEB, Constant</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_top ltx_border_r\" id=\"S3.T1.11.13.1.4\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.3.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.3.3.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.3.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.3.3.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.3.3.2.1.1.1\" style=\"font-size:90%;\">Minimum CW</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.3.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.3.3.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.3.3.3.1.1.1\" style=\"font-size:90%;\">{0, 1, 2, \u2026, 63}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.3.3.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.3.3.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.3.3.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.3.3.4.1.1.1\" style=\"font-size:90%;\">15</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.4.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.4.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.4.4.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.4.4.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.4.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.4.4.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.4.4.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.4.4.2.1.1.1\" style=\"font-size:90%;\">MCOT</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.4.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.4.4.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.4.4.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.4.4.3.1.1.1\" style=\"font-size:90%;\">{0, 1, 2, \u2026, 10}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.4.4.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.4.4.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.4.4.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.4.4.4.1.1.1\" style=\"font-size:90%;\">2, 3, 5, 8</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.5.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.5.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.5.5.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.5.5.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.5.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.5.5.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.5.5.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.5.5.2.1.1.1\" style=\"font-size:90%;\">MCS</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.5.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.5.5.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.5.5.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.5.5.3.1.1.1\" style=\"font-size:90%;\">{0, 1, 2, \u2026, 28}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.5.5.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.5.5.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.5.5.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.5.5.4.1.1.1\" style=\"font-size:90%;\">Auto. Rate Control</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.7.7\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.6.6.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.6.6.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.6.6.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.7.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.7.7.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.7.7.2.1.1\" style=\"width:68.3pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.7.7.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.7.7.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.7.7.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.7.7.3.1.1.1\" style=\"font-size:90%;\">{0, 1, 2, \u2026, 20}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.7.7.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.7.7.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.7.7.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.7.7.4.1.1.1\" style=\"font-size:90%;\">16</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.9.9\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.8.8.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.8.8.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.8.8.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.9.9.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.9.9.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.9.9.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.9.9.2.1.1.1\" style=\"font-size:90%;\"> [dBm]</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.9.9.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.9.9.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.9.9.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.9.9.3.1.1.1\" style=\"font-size:90%;\">{-90, -89, \u2026, -60}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S3.T1.9.9.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.9.9.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.9.9.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.9.9.4.1.1.1\" style=\"font-size:90%;\">-62 dBm</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.11.11\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S3.T1.10.10.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.10.10.1.1\">\n<span class=\"ltx_p\" id=\"S3.T1.10.10.1.1.1\" style=\"width:2.8pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.11.11.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.11.2.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.11.2.1.1\" style=\"width:68.3pt;\"><span class=\"ltx_text\" id=\"S3.T1.11.11.2.1.1.1\" style=\"font-size:90%;\"> [dBm]</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.11.11.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.11.3.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.11.3.1.1\" style=\"width:67.1pt;\"><span class=\"ltx_text\" id=\"S3.T1.11.11.3.1.1.1\" style=\"font-size:90%;\">{10, 11, \u2026, 30}</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.11.11.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S3.T1.11.11.4.1\">\n<span class=\"ltx_p\" id=\"S3.T1.11.11.4.1.1\" style=\"width:68.6pt;\"><span class=\"ltx_text\" id=\"S3.T1.11.11.4.1.1.1\" style=\"font-size:90%;\">23 dBm</span></span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 64 |
+
"capture": "Table I: The Action Space"
|
| 65 |
+
},
|
| 66 |
+
"2": {
|
| 67 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\" style=\"font-size:90%;\"><span class=\"ltx_tag ltx_tag_table\">Table II: </span>Training and Environment Parameters</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.5\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.5.6.1\">\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.6.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.6.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.6.1.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.6.1.1.1.1.1\" style=\"font-size:90%;\">Number of networks (NN)</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T2.5.6.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.6.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.6.1.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.6.1.2.1.1.1\" style=\"font-size:90%;\">1-6</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.5.7.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.7.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.7.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.7.1.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.7.1.1.1.1.1\" style=\"font-size:90%;\">Frequency</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.7.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.7.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.7.1.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.7.1.2.1.1.1\" style=\"font-size:90%;\">6 GHz</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.8.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.8.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.8.2.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.8.2.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.8.2.1.1.1.1\" style=\"font-size:90%;\">Bandwidth</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.8.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.8.2.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.8.2.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.8.2.2.1.1.1\" style=\"font-size:90%;\">20 MHz</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.2.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.1.1.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.1.1.1.1.1.1\" style=\"font-size:90%;\">Traffic characteristic (TR): Poisson and AR/VR with arrival rates </span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.2.2.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.2.2.2.1.1.1\" style=\"font-size:90%;\">[\u00a00 - 3000]</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.3.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.2.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.3.3.2.1.1.1\" style=\"font-size:90%;\">Packet size</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.3.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.1.1.1\" style=\"width:82.5pt;\"></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.9.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.9.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.9.3.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.9.3.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.9.3.1.1.1.1\" style=\"font-size:90%;\">Learning Rate, Optimizer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.9.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.9.3.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.9.3.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.9.3.2.1.1.1\" style=\"font-size:90%;\">0.001, Adam</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.10.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.10.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.10.4.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.10.4.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.10.4.1.1.1.1\" style=\"font-size:90%;\">Policy</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.10.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.10.4.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.10.4.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.10.4.2.1.1.1\" style=\"font-size:90%;\">RNN (2 layers of 256)</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.4.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.4.4.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.4.4.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.4.4.1.1.1.1\" style=\"font-size:90%;\">batch size, </span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.4.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.4.4.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.4.4.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.4.4.2.1.1.1\" style=\"font-size:90%;\">1000</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.11.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.11.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.11.5.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.11.5.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.11.5.1.1.1.1\" style=\"font-size:90%;\">Step size</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.11.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.11.5.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.11.5.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.11.5.2.1.1.1\" style=\"font-size:90%;\">0.1 s</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.12.6\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.12.6.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.12.6.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.12.6.1.1.1\" style=\"width:139.4pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.12.6.1.1.1.1\" style=\"font-size:90%;\">Episode duration</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T2.5.12.6.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.12.6.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.12.6.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.12.6.2.1.1.1\" style=\"font-size:90%;\">50 s</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T2.5.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.5.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.5.1.1.1\" style=\"width:139.4pt;\"></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T2.5.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.5.5.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.5.5.2.1.1\" style=\"width:82.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.5.5.2.1.1.1\" style=\"font-size:90%;\">0.3</span></span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 68 |
+
"capture": "Table II: Training and Environment Parameters"
|
| 69 |
+
}
|
| 70 |
+
},
|
| 71 |
+
"image_paths": {
|
| 72 |
+
"1": {
|
| 73 |
+
"figure_path": "2408.05884v2_figure_1.png",
|
| 74 |
+
"caption": "Figure 1: System concept.",
|
| 75 |
+
"url": "http://arxiv.org/html/2408.05884v2/x1.png"
|
| 76 |
+
},
|
| 77 |
+
"2": {
|
| 78 |
+
"figure_path": "2408.05884v2_figure_2.png",
|
| 79 |
+
"caption": "Figure 2: NR-U LBT4 Channel access mechanism.",
|
| 80 |
+
"url": "http://arxiv.org/html/2408.05884v2/x2.png"
|
| 81 |
+
},
|
| 82 |
+
"3": {
|
| 83 |
+
"figure_path": "2408.05884v2_figure_3.png",
|
| 84 |
+
"caption": "Figure 3: Distributed training and execution architecture.",
|
| 85 |
+
"url": "http://arxiv.org/html/2408.05884v2/x3.png"
|
| 86 |
+
},
|
| 87 |
+
"4": {
|
| 88 |
+
"figure_path": "2408.05884v2_figure_4.png",
|
| 89 |
+
"caption": "Figure 4: Integration of ns3-ai gym with RLlib using a dummy environment.",
|
| 90 |
+
"url": "http://arxiv.org/html/2408.05884v2/x4.png"
|
| 91 |
+
},
|
| 92 |
+
"5": {
|
| 93 |
+
"figure_path": "2408.05884v2_figure_5.png",
|
| 94 |
+
"caption": "Figure 5: Learning curves comparing the convergence of proposed DTDE against the centralized approach CTCE.",
|
| 95 |
+
"url": "http://arxiv.org/html/2408.05884v2/x5.png"
|
| 96 |
+
}
|
| 97 |
+
},
|
| 98 |
+
"validation": true,
|
| 99 |
+
"references": [],
|
| 100 |
+
"url": "http://arxiv.org/html/2408.05884v2"
|
| 101 |
+
}
|
20240819/2408.07967v2.json
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "FlashGS: Efficient 3D Gaussian Splatting for Large-scale and High-resolution Rendering",
|
| 3 |
+
"abstract": "This work introduces FlashGS, an open-source CUDA Python library 111Available at https://github.com/InternLandMark/FlashGS.\ndesigned to facilitate the efficient differentiable rasterization of 3D Gaussian Splatting through algorithmic and kernel-level optimizations. FlashGS is developed based on the observations from a comprehensive analysis of rendering process to enhance computational efficiency and bring the technique to wide adoption.\nThe paper includes a suite of optimization strategies, encompassing redundancy elimination, efficient pipelining, refined control and scheduling mechanisms, and memory access optimizations, all of which are meticulously integrated to amplify the performance of the rasterization process.\nAn extensive evaluation of FlashGS\u2019 performance has been conducted across a diverse spectrum of synthetic and real-world large-scale scenes, encompassing a variety of image resolutions. The empirical findings demonstrate that FlashGS consistently achieves an average 4x acceleration over mobile consumer GPUs, coupled with reduced memory consumption.\nThese results underscore the superior performance and resource optimization capabilities of FlashGS, positioning it as a formidable tool in the domain of 3D rendering.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "Neural Radiance Fields (NeRF) (Mildenhall et al., 2021 ###reference_b20###) have become popular for producing realistic image renderings, but the process of sampling numerous points per ray for pixel rendering hinders real-time capabilities. Recently, a new representation method known as 3D Gaussian Splatting (3DGS) (Kerbl et al., 2023 ###reference_b14###) has come to the forefront as a viable alternative. This technique has demonstrated the ability to attain real-time rendering speeds. The emergence of 3DGS has opened up a realm of possibilities, allowing for real-time exploration of indoor environments. This has far-reaching implications across various practical applications, including immersive free-viewpoint navigation, virtual property tours, and interactive virtual-reality gaming experiences.\n###figure_1### Despite 3DGS\u2019s advantages, real-time rendering of large-scale or high-resolution areas on city-scale scenes (Li et al., 2023 ###reference_b17###) or high quality scenes recorded by consumer GPS receivers (Crandall et al., 2011 ###reference_b5###) is still hindered by limited computational and memory resources.\nThis is due to the increase in the number of Gaussians and the size of each Gaussian as the image scale and resolution increase.\nExisting works widely employ well-established compression or pruning methods to avoid storing or computing excessive Gaussians (Girish et al., 2023 ###reference_b10###; Lu et al., 2024 ###reference_b19###; Fan et al., 2023 ###reference_b8###).\nHowever, these methods have not yielded significant performance improvements.\nA recent work, GScore (Lee et al., 2024 ###reference_b16###), attempts to analyze the original 3DGS algorithm but primarily addresses limitations on mobile GPUs by designing a novel domain-specific hardware.\nTo address the shortcomings of current works, we\u2019ve analyzed the 3DGS rendering process on a single consumer-grade GPU, identifying rendering performance bottlenecks, such as the rough intersection testing between Gaussians and tiles during preprocessing, and the massive redundant computations in volume rendering.\nWe further conduct a detailed performance profiling and extract several key factors hindering performance:\n1) Some Gaussian-tile pairs generated during preprocessing stage are not used in the actual rendering process.\n2) There are severe compute and memory access bottlenecks at different stages of the entire rasterization algorithm.\nBased on our in-depth observations, we propose FlashGS, a novel 3DGS rendering algorithm with efficient system-level optimizations. Specifically, we first introduce a new design of rasterization workflow, constructing a more efficient rendering pipeline.\nFor the implementation of this new rendering process, we firstly perform geometric and algebraic simplifications to alleviate high computational costs. We propose new runtime scheduling strategies to utilize the hardware, including the load balance and divergence elimination between threads, and a two-step prefetching execution pipeline to overlap computation and memory access operations.\nWe also optimize memory management to better leverage the GPU memory hierarchy.\nAdditionally, we further improve the performance by directly applying assembly-level optimizations.\nCompared to 3DGS (Kerbl et al., 2023 ###reference_b14###), GScore (Lee et al., 2024 ###reference_b16###) and gsplat (Ye and Kanazawa, 2023 ###reference_b29###), FlashGS achieves up to 4 speedup and 49% memory reduction, while keeping the same high quality metric peak signal-to-noise ratio (PSNR).\nTo the best of our knowledge, this is the first work to in-depth analyze, delicately re-design, and efficiently implement 3DGS-based volume rendering on a single consumer GPU.\nWe make it possible to achieve efficient rendering for large-scale and high-resolution scenes with low overhead.\nOur main contributions are as follows:\nWe conduct an in-depth study of the original 3DGS algorithm and the current state-of-the-art research. We identify main challenges in large-scale and high-resolution scenes and pinpoint the fundamental problems of performance bottlenecks.\nWe propose a new algorithm, FlashGS, which includes a precise redundancy elimination algorithm and an efficient rendering execution workflow.\nWe systematically implement FlashGS on GPU with holistic optimizations, including computation, memory management, and scheduling.\nWe test FlashGS on representative datesets, demonstrating that it significantly improves rendering speed while maintaining high image quality and low memory usage."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "2. Background and Related Work",
|
| 15 |
+
"text": "We briefly overview the novel-view synthesis and some existing methods, particularly the radiance field methods. Then, we introduce the current state-of-the-art, 3D Gaussian Splatting (3DGS). Finally, we briefly report some recent improvements in 3DGS."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "2.1. Novel View Synthesis",
|
| 21 |
+
"text": "Novel view synthesis is an important method in 3D reconstruction. It generates new images from new viewpoints (target poses) based on a set of input images of a 3D scene from certain viewpoints (source poses).\nBefore reconstruction, some representations are proposed to construct the 3D scene. Traditional methods use point clouds, voxels, or meshes to describe the spatial structure.\nHowever, these traditional explicit and discrete representations can lead to information loss and thus result in an incomplete representation of the scene.\nNeural Radiance Fields (NeRF) (Mildenhall et al., 2021 ###reference_b20###) is a widely adopted attractive representation method, leveraging deep learning techniques. It implicitly models 3D scenes using multi-layer perceptrons (MLPs), and this continuous function achieves a fine representation of the scene. Some subsequent works focus on improving the quality of synthesized images through optimizing the models. Mip-NeRF (Barron et al., 2021 ###reference_b2###) is a multiscale model that casts cones and encodes the positions and sizes of frustum to address the aliasing issues of NeRF. Some methods (Deng et al., 2022 ###reference_b6###; Wei et al., 2021 ###reference_b24###; Xu et al., 2022 ###reference_b27###) leveraging depth supervision of point clouds to make the model converge faster.\nThe substantial computation required by the MLP in NeRF results in slow training and rendering processes, which has inspired plenty of works on efficient training and inference. A common technique is storing the precompute training results to simplified data structures, such as sparse voxel grid (Hedman et al., 2021 ###reference_b12###) and octrees (Yu et al., 2021 ###reference_b30###; Hu et al., 2022 ###reference_b13###; Wang et al., 2022 ###reference_b23###). Other works propose or integrate new features with MLPs to enable faster training or inference, such as features in a voxel grid (Liu et al., 2020 ###reference_b18###; Wu et al., 2022 ###reference_b25###). A notable work greatly accelerates NeRF, Instant-NGP (M\u00fcller et al., 2022 ###reference_b21###), introduces a multi-resolution hash encoding grid, and simultaneously trains it with optimized MLPs.\n###figure_2###"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "2.2. 3D Gaussian Splatting",
|
| 27 |
+
"text": "3D Gaussian Splatting (3DGS) (Kerbl et al., 2023 ###reference_b14###) models the scene with a set of Gaussian ellipsoids, allowing fast rendering through rasterizing these Gaussians into high quality images. Figure 2 ###reference_### illustrate the 3DGS mechanism. The rasterizer can be coarsely divided into 3 steps: preprocessing (preprocessCUDA and DuplicateWithKeys), sorting and rendering.\nThe preprocessing step in the original implementation contains two main operations.\nTo begin with, 3D Gaussians after frustum culling are projected into the image plane as 2D Gaussians with mean and anisotropic covariance .\nThen the preprocessing pipeline assigns an ellipse with its axis-aligned bounding box (AABB) to each corresponding 2D Gaussian.\n3DGS uses AABB to get the intersected tiles with the ellipse, and for an AABB assigned by a certain Gaussian, the duplicateWithKeys operation traverses all tiles overlapping the AABB and organizes every tile with the Gaussian as key-value pairs. The keys are composed by the tile indexes and the depths of Gaussians, the values are the Gaussian indexes.\nThe sorting process sorts key-value pairs by depth for the front-to-back rendering, using the RadixSort in Nvidia CUB library.\nThe final rendering precess obtains the pixel color taking the time complexity of (the workload is divided into tiles) and each pixel color is computed considering the opacity and transmittance."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "2.3. Improvement of 3DGS",
|
| 33 |
+
"text": "Although 3DGS has achieved a significant leap in 3D reconstruction, there remains considerable room for improvement.\nSome work has further improved image quality (photorealistic) by enhancing the sampling process of 3DGS, to eliminate blur and artifacts.\nYan et al. (Yan et al., 2024 ###reference_b28###) analyze the aliasing effect in 3DGS rendering and introduce a multi-scale 3D Gaussians representation to render different levels of detail of the scene, by improving the sampling frequency. Zhang et al. (Zhang et al., 2024 ###reference_b31###) propose FreGS, a novel framework leveraging regularization to perform coarse-to-fine high-quality Gaussian densification, to alleviate the over-construction problem. These aspects are not the focus of this paper, readers can find more details in some surveys (Chen and Wang, 2024 ###reference_b3###; Fei et al., 2024 ###reference_b9###; Wu et al., 2024 ###reference_b26###).\nIn some high-resolution, large-scale scenes, 3DGS can generate millions of or even more (huge) Gaussians, putting immense pressure on memory units. This has inspired some works focusing on optimizing memory usage, which can be categorized into two main types: 1) Leveraging widely used techniques in model compression, like pruning and encoding. Scaffold-GS (Lu et al., 2024 ###reference_b19###) leverages the underlying scene geometry which uses anchor points growing and pruning strategies to effectively reduce redundant Gaussians, without loss of quality when rendering.\nEAGLES (Girish et al., 2023 ###reference_b10###) applies vector quantization (VQ) to quantize per-point attributes to compress the 3D Gaussian point clouds.\nLightGaussian (Fan et al., 2023 ###reference_b8###) further introduces data distillation to reduce the degree of spherical harmonics coefficients and proposes a VQ strategy by the global significance of Gaussians.\n2) Introducing multi-GPU parallelism to avoid the memory constraints of a single GPU.\nGrendel (Zhao et al., 2024 ###reference_b32###) is a scalable (up to 32 GPUs) distributed 3DGS training system, splitting 3DGS parameters across multiple GPUs and using sparse communication and dynamic load balance mechanisms.\nChen et al. (Chen and Lee, 2024 ###reference_b4###) proposed DoGaussian, which decomposes a scene into K blocks and applies Alternating Direction Methods of Multipliers (ADMM) to train 3DGS distributedly.\nHowever, only a handful of papers focus on the design and implementation of the original 3DGS algorithm itself and try to efficiently support critical operators in the computation process.\nThis forms the direct motivation for our work. gsplat (Ye and Kanazawa, 2023 ###reference_b29###) is a CUDA-accelerated differentiable rasterization library for 3DGS, achieving faster and more memory efficient rendering. Durvasula et al. (Durvasula et al., 2023 ###reference_b7###) perform a performance characterization on the rasterization-based differential rendering, including 3DGS, and introduce a novel primitive using warp-level reduction to accelerate costly atomic operations during the gradient computation.\nGScore (Lee et al., 2024 ###reference_b16###) is a specific hardware acceleration unit to efficiently support the rendering pipeline over the mobile GPU along with some algorithmic co-design, based on an analysis of Gaussian-based rendering."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "3. Observation and Motivation",
|
| 39 |
+
"text": "###figure_3### We conduct a detailed analysis of the 3D Gaussian Splatting rendering process. First, we obtain the critical steps in the rendering process by profiling the runtime breakdown.\nBased on it, we identify several significant performance issues, which inspired our optimization design.\nAs shown in the Figure 3 ###reference_###, we test 3D Gaussian Splatting on A100 and V100 GPUs with a representative large-scale dataset, MatrixCity (Ye and Kanazawa, 2023 ###reference_b29###).\nFrom top to bottom it shows the time proportion of key operators in the rendering process.\nThe rendering (renderCUDA) is the primary performance bottleneck, accounting for about 60% of the total time. Gaussian sorting (SortPairs) and the preceding generation of the unsorted key-value list (DuplicateWithKeys) take up nearly 20% of the time. Preprocessing time is within 10%, but it also should not be overlooked. Given that these steps are not independent, we conduct a comprehensive study of them."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "3.1. Observation 1: Excessive redundant computations hinder efficient rendering.",
|
| 45 |
+
"text": "###figure_4### ###figure_5### We observe that there are several computational redundancies in the rasterizing pipeline. Figure 4 ###reference_### shows that only a small proportion of the key-value pairs contribute to the result of rendering.\nNamely, the preprocessing kernels assigned unnecessary tiles without overlapping the projected ellipse to the Gaussian, creating redundant key values and inefficiencies in the sorting and rendering process.\nThere are three main factors contribute to the redundancies as Figure 5 ###reference_### shows.\nWe find that the redundancy I & II in Figure 5 ###reference_### results in that the assigned AABB covers more tiles of the real AABB as Figure 4 ###reference_### shows. The redundancy III results in the difference between the number of tiles covered by the AABB and the projected ellipse.\n###figure_6### The original implementation adopts the three-sigma rule of thumb that data points with an absolute difference from more than are extremely rare and define the boundary of an ellipse using . Considering the opacity can scale down the size of the ellipse, reducing the area of the AABB and the number of key-value pairs.\nWhen opacity is sufficiently low, the overestimation problem is more significant. As shown in Figure 6 ###reference_###, most Gaussian opacities in the smallcity scene of the MatrixCity dataset are quite low ( 65.1% Gaussians have the center opacity less than 0.35), further proving the importance of considering opacity when defining the ellipse.\nThe preprocessCUDA operation defines the assigned AABB as the bounding box of the circle whose radius is the semi-major axis of the projected ellipse. The AABB is larger than the real AABB of the ellipse, especially for the ellipsoids with high flattening, which introduces computational redundancies for key-values binning, sorting, and rendering.\nAll tiles in AABB are traversed and binned to key-value pairs, even if the tile utterly not covered by the ellipse, which will result in redundant computation in subsequent sorting and rendering process."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "3.2. Observation 2: Inappropriate execution pipelines reduce hardware utilization.",
|
| 51 |
+
"text": "We find that the main steps in 3DGS exhibit an imbalance between computation and memory access, which can put excessive pressure on the corresponding hardware units, causing computation or memory bottlenecks.\nThe preprocess step (preprocessCUDA in Algorithm 1 ###reference_###) kernel has a linear complexity , where P is the number of Gaussians. And each Gaussian is related to a large array of spherical harmonics coefficients.\nThis results in significant pressure on cache bandwidth due to the massive read-write operations (lines 17-22 of Algorithm 1 ###reference_###).\nDuring the rasterization phase, 3DGS first projects Gaussian spheres on tiles to obtain the tile-gaussian pairs (DuplicateWithKeys).\nIts asymptotic complexity depends on the number of Gaussians and the number of tiles covered by their projection . As shown in Algorithm 2 ###reference_###, in this step, aside from some bitwise operations, all the operations involve reading and writing to global memory.\nDuring rendering, the extensive pixel-level calculations introduce computational bottlenecks, whereas the aforementioned two steps are primarily memory access operations. This results in an imbalance between computation and memory access in the entire 3DGS execution workflow, which can lead to under-utilization of GPU hardware resources."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "4. FlashGS Design",
|
| 57 |
+
"text": "###figure_7### To address the aforementioned issues, we optimize the Gaussian rendering process in two stages. First, we design a more precise intersection algorithm to alleviate subsequent computational bottlenecks related to redundant Gaussian calculations, and use geometric simplifications to avoid expensive intersection computations. Additionally, we reorganized and restructured the computation process based on the characteristics of each kernel/micro kernel to prevent unbalanced workloads. As Figure 7 ###reference_### illustrates, the whole process can greatly benefits from the reduced number of false positive tile kv pairs in FlashGS, as well as from the optimization in algorithms and scheduling for runtime."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "4.1. Efficient and Precise Intersection Algorithm",
|
| 63 |
+
"text": "###figure_8### As shown in Figure 8 ###reference_###, the precise intersection algorithm can reduce the number of Gaussians involved in subsequent computations, thereby alleviating the computational and memory access intensity/pressure. This is crucial because for the two vital bottlenecks, sorting and rendering, their workload and the number of Gaussians to be processed are proportional.\nWe need to confirm the valid range of the ellipse projected by the Gaussians before performing the precise intersection calculation. As mentioned in Section 3.1 ###reference_###, for 3D image reconstruction, we must consider not only the effect of the general Gaussian distribution on the effective range but also the changes in opacity.\n3D Gaussian Splatting applies the three-sigma rule to get the valid ellipse:\nwhere represents the semi-major axis length of the projected ellipse, which corresponds to the eigenvalue of the covariance matrix, and is the radius of the circle abstracted from the ellipse.\nConsidering the statistics in Section 3.1 ###reference_###, 65% of Gaussian centers have opacity in quite a low range ([0, 0.35], this range will be explained in the following paragraphs), which motivates us to incorporate opacity to obtain the effective range:\nwhere is the initial (central) opacity of the Gaussian. When decays to a certain threshold , we consider this to be the boundary of the ellipse, as described in Equation 3 ###reference_###:\nGiven that RGB typically has 256 discrete values, typically .\nWe can solve that when , our method outperforms the three-sigma rule, and still use 3 conversely.\nWe adopted an exact intersection algorithm to eliminate invalid Gaussians. Consider that a rough intersection algorithm can be highly inefficient for ellipses since they can be quite elongated, which potentially introduces a large amount of unnecessary computations on invalid Gaussians for tiles.\nDirectly computing the intersection between the Gaussian-projected ellipse and the tiles on the screen is expensive. We reduce the overhead of direct intersection computation through a two-stage filtering process: (1) Use a bounding box (note: we calculate a rectangle tightly tangent to the ellipse, rather than a square tangent to the circle enclosing the ellipse as in the original algorithm) is to set a coarse-grained range. (2) For tiles within or intersecting the bounding box, check whether they intersect with the ellipse.\n###figure_9### For each tile intersection, we use geometric equivalent transformations to avoid the cost of directly solving multiple quadratic equations, considering that rectangular tiles are not easily represented by simple equations.\nOur key observation is that the intersection between an ellipse and a rectangle can be transformed into a problem of determining whether the projection line segment of the ellipse onto the line of each edge of the rectangular overlaps with the line segment of the rectangle\u2019s edge, as illustrated in Figure 9 ###reference_###. If they intersect, we find the endpoints of the segment on the line that is enclosed by the ellipse. We then use these endpoints to determine whether this segment overlaps with the corresponding edge of the rectangle.\nAs shown in Algorithm 3 ###reference_###, we first classify the intersection between an ellipse and a rectangle into two cases.\nThe first case is straightforward: if the center of the ellipse is inside the rectangular, there must be an overlap.\nFor the more complex case where the ellipse\u2019s center is outside the rectangle, we solve for the intersection of the ellipse with each line containing the edge of the rectangle."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "4.2. Refined and Balanced Workflow",
|
| 69 |
+
"text": "The workflow of 3DGS rendering is a complex process involving multiple steps as mentioned above. Moreover, different sub-components may have varying computational patterns, and these sub-computations are not entirely independent of each other. Therefore, we need to perform a systematic analysis and optimization from a more holistic perspective.\nOur key idea is to balance the computational cost of each part as much as possible to avoid potential bottlenecks.\nOur approach behind this idea is to amortize different types of operations over the timeline to prevent a specific transaction from bursting and putting excessive pressure.\nWe extract the computational bottlenecks and memory access bottlenecks from different operators and interweave them to form a more efficient execution workflow.\n###figure_10### We observe that before performing rendering, there are several important operations, including the previously mentioned precise intersection, calculating and recording some Gaussian-related information, and constructing key-value pairs for sorting.\nIn baseline, both these two functions are bandwidth-limited and involve extensive memory read and write operations.\nHowever, we observed that the subsequent rendering process is computation-bound. This is due to the unrefined method for Gaussian intersection in the baseline algorithm, which led to massive false positives. Therefore, we may be able to afford more computation during the preprocessing stage. This not only helps to balance the bandwidth-limited parts but also reduces the overhead for subsequent per-pixel rendering. This is one of the key motivations for adopting a precise intersection algorithm mentioned before.\nIn the preprocessing stage, we checked the visibility of the Gaussian and then computed and stored various pieces of information needed for subsequent steps, such as depth, Gaussian radius, and coordinates. In the original algorithm, each thread processing a Gaussian that intersects with a tile accesses global memory to read the sphere coefficients and write several Gaussian information at the end of the preprocessing. This results in significant bandwidth contention, causing the workflow to be stalled during global memory access due to implicit synchronization between the separately launched kernels. We combined our new exact intersection algorithm with DuplicateWithKeys() to precisely generate the valid Gaussian key-value pairs.\nAs previously mentioned, accurately computing the intersection between a tile and a projected ellipse is complex and requires solving multiple equations. Consequently, many operations during this process are compute-bound, leaving surplus bandwidth resources available. Therefore, we reorganized these two operators. By interleaving our exact intersection computations within the delays caused by extensive global memory writes, we ensure that both the GPU\u2019s compute units and memory bandwidth are efficiently utilized throughout the execution workflow. Our approach achieves a high compute-to-memory ratio. As the result shown in Figure 10 ###reference_###, the precise intersection operation reduces redundant Gaussians, alleviating the significant memory access overhead during the sorting stage and the additional computations during the rendering stage. In our new preprocessing stage, we can reuse some intermediate results by kernel fusion, transitioning directly from intersection to key-value pair generation, thereby reducing the number of memory read and write operations. Although the total number of computations and memory access operations of the precise intersection and the merged operators may increase, we expose more scheduling opportunities to the compiler and hardware to achieve better balance and amortize over a longer timeline."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "5. FlashGS Implementation",
|
| 75 |
+
"text": "In this section, we efficiently implement the proposed FlashGS algorithm in Section 4 ###reference_### to fully leverage algorithm benefits while avoiding the additional overhead of complex computations.\nIn our implementation, we further enhance the hardware utilization at runtime by applying\noptimizations on computation, scheduling, and memory management."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5.1",
|
| 79 |
+
"parent_section_id": "5",
|
| 80 |
+
"section_name": "5.1. Preprocess",
|
| 81 |
+
"text": "In the new preprocessing stage as shown in Algorithm 4 ###reference_###,\nOur precise per-tile intersection algorithm with the ellipse introduces massive complex calculations, which might significantly hinder applying this algorithm in current SOTA works.\nIt is essential to implement this algorithm efficiently, despite our efforts in Section 4.2 ###reference_### to balance the compute-to-memory ratio by binding the precise intersection with some memory access operations.\nWe optimized the algorithm in two key aspects: 1) applying algebraic simplifications to reduce the overhead of a single tile-ellipse intersection; 2) proposing an adaptive scheduling strategy to balance the intersection tasks of the entire projected ellipse of a Gaussian."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.1.1",
|
| 85 |
+
"parent_section_id": "5.1",
|
| 86 |
+
"section_name": "5.1.1. Algebraic Simplification",
|
| 87 |
+
"text": "We use algebraic equivalence transformations to avoid high-cost operations (instructions).\nIn our geometric transformations of intersection in Section 4.1 ###reference_###, to determine if two line segments on the same line overlap, we can compare their endpoint coordinates. Assume the two line segments are [ai, bi] and [aj, bj], they overlap if their intersection is not empty as shown in Equation 4 ###reference_###:\nSpecifically, this is equivalent to checking Equation 5 ###reference_### that if the right endpoint (end) of one line segment is after the left endpoint (start) of the other line segment, and if the left endpoint (start) of the line segment is before the right endpoint (end) of the other line segment:\nThe process of finding the endpoints of intersecting line segments requires solving quadratic equations, which involves high-cost operations such as division and square roots. Assume an edge of a tile is represented by the line segment [ai, bi], The endpoints of the intersecting line [aj, bj] (if exists) are two roots of the quadratic equation formed by the line (on which the tile edge lies) and the ellipse. As shown in the Equation 6 ###reference_###, where , and are the coefficients of the quadratic equation :\nWithout loss of generality, let , then this condition can be transformed into Equation 7 ###reference_### to avoid the high-cost operations, division, and square roots, where the two expressions and :"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.1.2",
|
| 91 |
+
"parent_section_id": "5.1",
|
| 92 |
+
"section_name": "5.1.2. Adaptive Size-aware Scheduling",
|
| 93 |
+
"text": "The precise per-tile intersection algorithm also implies that the workload for intersection tasks can vary significantly for different Gaussians, as the number of tiles covered by ellipses of different sizes can vary greatly.\nIn the original 3DGS intersection algorithm, which is based on bounding boxes, this issue does not arise because the covered tile region can be located directly using only the vertices of the bounding box, ignoring the bounding box size.\nAs shown in the figure 11 ###reference_###, for Gaussians of different sizes, the resulting bounding boxes, i.e., the number of tiles to be intersected, can vary greatly.\n###figure_11### We automatically select our processing method based on the size of the Gaussians to achieve efficient scheduling.\nTo ensure that the workload of each thread is balanced, we use an adaptive mapping. When the ellipse is quite small and only requires calculation for one tile, we continue using the current thread for the precise intersection of this Gaussian.\nWhen the ellipse is large, we remap the workload to a thread group (in our implementation, we chose a warp to balance synchronization overhead, considering that in practice, the size of the ellipse seldom covers more than 32 tiles). Thus, we achieve an efficient calculation for the entire Gaussian projected ellipse which can intersect with different numbers of tiles.\nAfter applying all the aforementioned optimizations, our new processing kernel is shown in the Algorithm 4 ###reference_###."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.2",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "5.2. Rendering",
|
| 99 |
+
"text": "To address the issue of low hardware utilization during the 3DGS execution, we implement a two-level pipeline optimization from top to bottom. We adjusted the application\u2019s execution workflow and the instruction dispatch."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5.2.1",
|
| 103 |
+
"parent_section_id": "5.2",
|
| 104 |
+
"section_name": "5.2.1. Low-latency Execution Pipelining",
|
| 105 |
+
"text": "###figure_12### During rendering, each tile performs computations related to its associated Gaussians, such as calculating transmittance and color. These computations may terminate early based on the accumulated saturation. These calculations depend on the preprocessed and sorted Gaussian lists, which need to be loaded from global memory in advance.\nThe process of reading Gaussian information can be divided into two steps: (1) obtaining the Gaussian index based on the current tile information; (2) using the index to fetch the stored specific information, such as the pixel coordinates of the Gaussian center, the quadratic form of the ellipse, and opacity, among other details.\nThe high latency of global memory access can hinder the efficient execution of the computation pipeline, especially since there are two dependent steps of global memory access as mentioned above.\nOur core strategy is to issue as many instructions as possible during memory access latency periods to alleviate stalls caused by data dependencies.\nSpecifically, we introduced a two-level prefetching strategy to maximize the overlap between computation and memory access illustrated in Figure 12 ###reference_###. Using a software pipelining approach, for the computation at step i, we first fetch the Gaussian index for step i+2 from global memory. Then, using the already fetched index, we retrieve the specific Gaussian information for step i+1 from global memory. Finally, we perform the rendering computation for step i using the Gaussian information fetched at step i-1. We overlap independent instructions as much as possible in our fine-grained rearrangement, thereby achieving a more efficient instruction dispatch pipeline."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "5.2.2",
|
| 109 |
+
"parent_section_id": "5.2",
|
| 110 |
+
"section_name": "5.2.2. Warp Divergence Control",
|
| 111 |
+
"text": "Thread divergence within a warp can cause some threads to stall, affecting execution efficiency. The core idea is to minimize conditional branching or move conditional checks to an outer layer (coarser granularity), thus ensuring that threads within a warp execute the same path as much as possible.\nIn the render step of the original 3DGS algorithm, each thread must check opacity and transmittance when computing a pixel to perform early stopping and determine whether the pixel is valid before writeback. We move the opacity check to the preprocessing stage for effective Gaussian computations. This preemptive filtering at the coarser Gaussian level eliminates redundancy, replacing the per-pixel conditional checks during rendering."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5.2.3",
|
| 115 |
+
"parent_section_id": "5.2",
|
| 116 |
+
"section_name": "5.2.3. Coarse-grained Workload Partition",
|
| 117 |
+
"text": "We facilitate common data sharing by adjusting the task dispatch granularity so that a single thread handles multiple groups of tasks.\nThis larger granularity is mainly based on two considerations. Firstly, it aims to fully utilize fast memory units (such as registers), thus we cannot set the buffer too small (e.g., only buffering a very small amount of data for a single item). Secondly, it takes into account the issue of shared data. In rendering, there is also a certain amount of common computation. We provide a group of tasks as a unit to the compiler to expose more potential sharing opportunities.\nSpecifically, we expose the opportunities for common subexpression elimination (CSE) to further improve computational performance by reducing FLOPs."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "5.3",
|
| 121 |
+
"parent_section_id": "5",
|
| 122 |
+
"section_name": "5.3. Common Optimizations",
|
| 123 |
+
"text": "In addition to the aforementioned efficient parallel algorithm implementation of some critical kernels, we also applied several general techniques to optimize memory and instruction usage throughout the FlashGS computation, achieving further performance benefits."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5.3.1",
|
| 127 |
+
"parent_section_id": "5.3",
|
| 128 |
+
"section_name": "5.3.1. Memory Management",
|
| 129 |
+
"text": "3DGS involves a large number of memory operations, including read, write, and allocation.\nWe need a meticulous design to exploit the GPU\u2019s hierarchical memory and the memory bandwidth.\nWe utilize the constant memory to reduce latency.\nWe carefully analyze the algorithm and extract the special parameters to efficiently utilize the GPU memory hierarchy. The core idea is to leverage special memory access patterns to maximize the use of data loaded into a faster memory hierarchy.\nWe use constant memory when available, which is highly beneficial for accessing data shared across all threads. For example, in the preprocessing step, all threads need to read some predefined input information, such as projection matrices and transformation matrices. By leveraging the large parameter passing feature (CUDA 12.1), we pass these data directly as parameters to the kernel rather than pointers to their memory locations. This ensures these parameters are placed in constant memory instead of global memory.\nGiven that global memory latency is approximately 500 cycles, while constant memory access latency with caching is only around 5 cycles, our data placement strategy not only improves access speed but also reduces bandwidth pressure on the L1 cache caused by frequent global memory accesses.\nWe reduce the dynamic memory allocation.\nWe avoid the performance reduction of frequent memory allocation, which could be caused by the overhead of system calls and memory fragmentation. We extract the dynamic memory allocation operations and related preprocessing operations to the initial stage. For example, a unified preprocessing can be done in advance for computations involving different viewpoints of the same scenario."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5.3.2",
|
| 133 |
+
"parent_section_id": "5.3",
|
| 134 |
+
"section_name": "5.3.2. Assembly Optimization",
|
| 135 |
+
"text": "We also utilize explicit and implicit optimizations to better leverage efficient instructions supported by the GPUs.\nIn Gaussian computations, due to the exponential part of the Gaussian distribution, naive calls can result in high overhead. By converting the direct multiplication in the exponential expression to logarithmic form, we first perform addition and then exponential operations. This approach is beneficial because it allows the compiler to use fused multiply-add (FMA) instructions, which have the same overhead as a single MUL or ADD instruction.\nAdditionally, when performing logarithmic and exponential operations, we explicitly specify base 2 because the GPU\u2019s Special Function Unit (SFU) hardware instructions are optimized for base 2, allowing for direct and efficient calls of the fast ex2.approx.ftz.f32 PTX instruction."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "6",
|
| 139 |
+
"parent_section_id": null,
|
| 140 |
+
"section_name": "6. Evaluation",
|
| 141 |
+
"text": ""
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "6.1",
|
| 145 |
+
"parent_section_id": "6",
|
| 146 |
+
"section_name": "6.1. Experimental Setup",
|
| 147 |
+
"text": ""
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"section_id": "6.1.1",
|
| 151 |
+
"parent_section_id": "6.1",
|
| 152 |
+
"section_name": "6.1.1. Testbed",
|
| 153 |
+
"text": "We conduct our experiments on two NVIDIA GPU platforms. Most tests are performed on a consumer-grade RTX 3090 (Ampere architecture, 24GB GDDR6X memory, CUDA Compute Capability 8.6). We use an A100 (Ampere architecture, 80GB HBM2e, CUDA Compute Capability 8.0) data-center GPU to study performance sensitivity.\nWe compile our codes with GCC 10.5.0 and NVCC in CUDA 12.0. We have also integrated FlashGS as a callable module in Python to enhance usability."
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"section_id": "6.1.2",
|
| 157 |
+
"parent_section_id": "6.1",
|
| 158 |
+
"section_name": "6.1.2. Dataset",
|
| 159 |
+
"text": "We comprehensively test FlashGS and 3DGS on 6 representative datasets, which cover very different resolutions on a total of 11 scenes with hundreds or thousands of frames per scene. The Truck and Train are outdoor scenes from the Tanks & Temple (Knapitsch et al., 2017 ###reference_b15###). The Playroom and DrJohnson are two indoor scenes in DeepBlending (Hedman et al., 2018 ###reference_b11###). 3DGS claims that they achieve real-time rendering with 1080p resolution on these datasets. We use another 2 large-scale and high-resolution datasets which beyond 3DGS\u2019s capabilities to render in real-time.\nMatrixCity (Li et al., 2023 ###reference_b17###) is a comprehensive and large-scale high-quality dataset, comprising thousands of aerial images at a resolution of 1080p. Rubble in Mill19 (Turki et al., 2022 ###reference_b22###) contains high-resolution images captured by drones. We train 30K iterations on each dataset with 3DGS to obtain the Gaussian model for rendering."
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"section_id": "6.1.3",
|
| 163 |
+
"parent_section_id": "6.1",
|
| 164 |
+
"section_name": "6.1.3. Baseline",
|
| 165 |
+
"text": "We compare FlashGS with 3DGS (Kerbl et al., 2023 ###reference_b14###) and the recent optimizations on GScore (Lee et al., 2024 ###reference_b16###). Since GScore is implemented as a hardware accelerator unit, we cannot directly test it. They claim that GScore achieves 1.86 speedup with their intersection and scheduling techniques 222Their shape-aware intersection test provides 1.71 speedup and subtle skipping offers an additional 15% improvement.."
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"section_id": "6.2",
|
| 169 |
+
"parent_section_id": "6",
|
| 170 |
+
"section_name": "6.2. Overall Performance",
|
| 171 |
+
"text": "As shown in Table 1 ###reference_###, the rasterization pipe of FlashGS outperforms the original 3DGS algorithm across all the performance metrics on every dataset with different scenes and resolutions.\nWe can always achieve \u00bf 100FPS rendering on RTX 3090, even for high-resolution and large-scale datasets. In the slowest frame in Rubble of all datasets, we achieve 107.3 FPS. This demonstrates that FlashGS can perform real-time rendering even in extremely large and high-resolution cases.\nFlash GS achieves up to 30.53 speedup with an average of 12.18 on the Matrixcity dataset at 4k resolution. We achieve 7.2 average speedup on all 11 scenes, while achieving 8.6 speedup on the 7 large-scale or high-resolution tests, which is 3.87 of the GScores results (Lee et al., 2024 ###reference_b16###). FlashGS achieves a 2.29 to 30.53 speedup for all the frames in different scenes, comprehensively proving the generality of our design and optimization."
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"section_id": "6.3",
|
| 175 |
+
"parent_section_id": "6",
|
| 176 |
+
"section_name": "6.3. Runtime Breakdown Analysis",
|
| 177 |
+
"text": "###figure_13### In this section, we compare and analyze the runtime breakdown of FlashGS to 3DGS, aiming to reveal the source of our excellent performance.\nFigure 13 ###reference_### shows the rasterization time and the breakdown for 6 representative frames shows max or min speedup. This demonstrates that we have accelerated all stages, including preprocess, sort, and render.\nIn FlashGS, these stages respectively account for average 19.6%, 29.1%, and 47.6% of the total time, whereas in 3DGS, they account for 13.2%, 25.4%, and 59.6%. The optimizations in the preprocess and render stages have already been discussed earlier, while the speedup in the sorting stage is primarily due to the reduction in the number of key-value pairs to be sorted after applying our precise intersection algorithm. We will give a more detailed analysis below."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"section_id": "6.3.1",
|
| 181 |
+
"parent_section_id": "6.3",
|
| 182 |
+
"section_name": "6.3.1. Profiling Results",
|
| 183 |
+
"text": "###figure_14### We further demonstrate our optimizations in the rendering and preprocessing stages through profiling results of memory and compute units, as shown in the figures. Figure 14 ###reference_### shows that we reduce the issued instructions in the rendering stage, alleviating the computation-bound problem. This prove the effectiveness of our precise intersection algorithm and the optimizations for low-latency rendering. The total issues instructions is significantly reduced by one to two orders of magnitude. For memory access trasactions in preprocessing, we also reduced the number of global memory accesses by 43%-87% compared to 3DGS.\n###figure_15### Figure 15 ###reference_### further shows the reason for the significant instruction reduction in the rendering stage, which is dominant in the rasterization. In tile-based rendering, the total number of issued instructions is the product of the number of tiles and the instructions per tile. Therefore, this reduction mainly stems from two aspects. In Figure 15 ###reference_###, the number of rendered key-value pairs benefit from our intersection optimization, reducing by 68%-96%. The instructions involved in each tile\u2019s computation also decrease by 67%-71%, as we further optimize the renderCUDA kernel at the instruction level. The reduction in the number of generated key-value pairs also benefits the sorting process, as it significantly decreases the size of the list to be sorted. Additionally, less memory is required to store these key-value pairs."
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"section_id": "6.4",
|
| 187 |
+
"parent_section_id": "6",
|
| 188 |
+
"section_name": "6.4. Sensitivity Study",
|
| 189 |
+
"text": "In this section, we study the FlashGS performance sensitivity on different GPUs and sciences."
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"section_id": "6.4.1",
|
| 193 |
+
"parent_section_id": "6.4",
|
| 194 |
+
"section_name": "6.4.1. Performance on A100",
|
| 195 |
+
"text": "We also conduct the experiments presented in Table 1 ###reference_### on the A100 GPU, and the results are shown in Table 1 ###reference_###. The total rasterizerization time is slower on the A100 compared to the 3090, with on average 1.43 slower. This is primarily due to the rendering step is dominant, as shown in Figure 13 ###reference_###, which heavily relies on FP32 computation, where the FP32 peak performance of the A100 is only 19.5TFLOPS while the 3090 is 35.6TFLOPS. Although the operations before rendering, such as sorting, are mainly memory-bandwidth-bound, and the A100 has a higher bandwidth than 3090, these benefits can not outweigh the render performance reduction. However, we still achieve significant speedups to 3DGS in all datasets on the A100 with an average of 123.8-423.7 FPS across 11 scenes in Table 1 ###reference_###, always reaching real-time rendering."
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"section_id": "6.4.2",
|
| 199 |
+
"parent_section_id": "6.4",
|
| 200 |
+
"section_name": "6.4.2. Performance on different scenes",
|
| 201 |
+
"text": "In Table 1 ###reference_###, we observe two main results: 1) For the same dataset, the speedup at 4K resolution is higher than at 1080p. 2) Across different datasets, the highest speedup is achieved in large-scale, high-resolution scenes (MatrixCity-4K). This is primarily source from our precise intersection calculations, where the computational load in the rendering is proportional to the number of Gaussians and the number of tiles each Gaussian intersects. In large scenes with more objects, the number of Gaussians increases; for high resolutions, the Gaussians become larger to render finer details.\nIn such cases, the redundancy elimination effect is more pronounced. And if in a very small and simple scene, each Gaussian might be so small that it intersects only one tile, leading to a lower improvement of our precise intersection."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"section_id": "6.5",
|
| 205 |
+
"parent_section_id": "6",
|
| 206 |
+
"section_name": "6.5. Image Quality",
|
| 207 |
+
"text": "###figure_16### Figure 17 ###reference_### shows the most complex frame of the origin large-scale MatrixCity dataset at 1080p, to compare the output quality. We compare the Peak Signal to Noise Ratio (PSNR) between FlashGS and 3DGS, which is a standard metric in computer graphics (higher is better). The result shows that FlashGS does not change the quality, keeping 31.52 PSNR with 8.57 speedup.\nIt is reasonable that our precise intersection algorithms only reduce the false-positive redundancies. And we do not apply pruning or quantization strategies in our implementation so there is no accuracy loss.\nIn Table 2 ###reference_###, we observe two main results: 1) For the same dataset, the speedup at 4K resolution is higher than at 1080p. 2) Across different datasets, the highest speedup is achieved in large-scale, high-resolution scenes (MatrixCity-4K). This is primarily source from our precise intersection calculations, where the computational load in the rendering is proportional to the number of Gaussians and the number of tiles each Gaussian intersects. In large scenes with more objects, the number of Gaussians increases; for high resolutions, the Gaussians become larger to render finer details.\nIn such cases, the redundancy elimination effect is more pronounced. And if in a very small and simple scene, each Gaussian might be so small that it intersects only one tile, leading to a lower improvement of our precise intersection."
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"section_id": "6.6",
|
| 211 |
+
"parent_section_id": "6",
|
| 212 |
+
"section_name": "6.6. Image Quality",
|
| 213 |
+
"text": "###figure_17### Figure 17 ###reference_### shows two representative frames of the large-scale MatrixCity dataset. We compare the PSNR between FlashGS and 3DGS, and the result shows that FlashGS does not change the quality. It is reasonable that our precise intersection algorithms only reduce the false-positive redundancies. And we do not apply pruning or quantization strategies in our implementation so there is no accuracy loss."
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"section_id": "6.7",
|
| 217 |
+
"parent_section_id": "6",
|
| 218 |
+
"section_name": "6.7. Memory Comparison",
|
| 219 |
+
"text": "FlashGS allocates less memory than 3DGS and gsplat, up to 49.2% reduction.\nTable 2 ###reference_### compares the memory usage before and after rendering the 800th frame on an NVIDIA A100 GPU for different models, including gsplat (with set to and ), the original 3DGS, and FlashGS.\nSpecifically, the gsplat with set to has a maximum memory allocation of 10.75 GB after rendering, while with set to , the maximum memory allocation is reduced to 9.83 GB, significantly decreasing memory usage. This reduction occurs because, when is set to , the rasterization process is more memory-efficient, packing intermediate tensors into sparse tensor layouts. This is particularly beneficial in large scenes where each camera only sees a small portion of the scene, greatly reducing memory usage. However, this also introduces some runtime overhead.\nIn contrast, the original GS uses the most memory, with a memory allocation of 13.45 GB after rendering\n, making it an important factor to consider when dealing with complex scenes.\nFlashGS also ensures consistency and predictability in memory usage through a static allocation method, with a maximum memory allocation of 6.83 GB, which is lower than the other models. This demonstrates its superior efficiency and performance when handling large scenes. The static allocation method effectively avoids fluctuations in memory allocation and release processes, resulting in more stable memory management.\nThe number of kv pairs generated by each method is a crucial factor in determining memory usage when rendering. The original 3DGS model generates 56,148,670 key-value pairs, which leads to its higher memory consumption. In comparison, gsplat with set to and generates 56,996,302 and 56,998,101 key-value pairs respectively, showing a slight increase. And it proves that the memory reduction come from its compression techniques.\nFlashGS, on the other hand, generates only 3,436,142 key-value pairs, drastically reducing the memory usage."
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"section_id": "7",
|
| 223 |
+
"parent_section_id": null,
|
| 224 |
+
"section_name": "7. Conclusions",
|
| 225 |
+
"text": "We propose FlashGS, enabling real-time rendering of large-scale and high-resolution scenes.\nIn this paper, we achieved a fast rendering pipeline through a refined algorithm design and several highly optimized implementations, addressing the redundancy and improper compute-to-memory ratio issues present in original 3DGS.\nFlashGS significantly surpasses the rendering performance of existing methods on GPUs, achieves efficient memory management, while maintaining high image quality."
|
| 226 |
+
}
|
| 227 |
+
],
|
| 228 |
+
"appendix": [],
|
| 229 |
+
"tables": {
|
| 230 |
+
"1": {
|
| 231 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T1\">\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S6.T1.1\" style=\"width:593.0pt;height:97.9pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-139.5pt,23.0pt) scale(0.68,0.68) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S6.T1.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.1.1\" rowspan=\"2\"><span class=\"ltx_text ltx_nopad\" id=\"S6.T1.1.1.1.1.1\"><svg height=\"18.92\" overflow=\"visible\" version=\"1.1\" width=\"93.56\"><g transform=\"translate(0,18.92) scale(1,-1)\"><path d=\"M 0,18.92 93.56,0\" stroke=\"#000000\" stroke-width=\"0.4\"></path><g class=\"ltx_svg_fog\" transform=\"translate(0,0)\"><g transform=\"translate(0,9.46) scale(1, -1)\"><foreignobject height=\"9.46\" overflow=\"visible\" width=\"45.09\">\n<span class=\"ltx_inline-block\" id=\"S6.T1.1.1.1.1.1.pic1.1.1\">\n<span class=\"ltx_inline-block ltx_align_left\" id=\"S6.T1.1.1.1.1.1.pic1.1.1.1\">\n<span class=\"ltx_p\" id=\"S6.T1.1.1.1.1.1.pic1.1.1.1.1\">Metrics</span>\n</span>\n</span></foreignobject></g></g><g class=\"ltx_svg_fog\" transform=\"translate(46.78,9.46)\"><g transform=\"translate(0,9.46) scale(1, -1)\"><foreignobject height=\"9.46\" overflow=\"visible\" width=\"46.78\">\n<span class=\"ltx_inline-block\" id=\"S6.T1.1.1.1.1.1.pic1.2.1\">\n<span class=\"ltx_inline-block ltx_align_right\" id=\"S6.T1.1.1.1.1.1.pic1.2.1.1\">\n<span class=\"ltx_p\" id=\"S6.T1.1.1.1.1.1.pic1.2.1.1.1\">Dataset</span>\n</span>\n</span></foreignobject></g></g></g></svg></span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" colspan=\"2\" id=\"S6.T1.1.1.1.2\">Truck</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" colspan=\"2\" id=\"S6.T1.1.1.1.3\">Train</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" colspan=\"2\" id=\"S6.T1.1.1.1.4\">Playroom</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" colspan=\"2\" id=\"S6.T1.1.1.1.5\">DrJohnson</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" colspan=\"2\" id=\"S6.T1.1.1.1.6\">Matrixcity</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.1.7\">Rubble</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T1.1.1.2.1.1\">1080p</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.2\">4k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T1.1.1.2.1.3\">1080p</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.4\">4k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T1.1.1.2.1.5\">1080p</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.6\">4k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T1.1.1.2.1.7\">1080p</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.8\">4k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T1.1.1.2.1.9\">1080p</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.10\">4k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T1.1.1.2.1.11\">4608*3456</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.1\">AvgTime (FlashGS / 3DGS)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.2\">2.22 / 8.21</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.3\">3.46 / 24.19</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.4\">1.93 / 7.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.5\">3.32 / 12.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.6\">1.44 / 6.83</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.7\">2.72 / 10.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.8\">1.63 / 9.11</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.9\">2.99 / 28.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.10\">3.22 / 20.90</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.11\">4.90 / 66.55</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.3.1.12\">6.19\n/ 44.11</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.1\">MaxTime (FlashGS / 3DGS)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.2\">4.44 / 10.18</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.3\">4.65 / 29.87</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.4\">3.48 / 12.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.5\">4.22 / 42.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.6\">2.92 / 10.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.7\">4.69 / 32.11</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.8\">4.95 / 16.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.9\">6.91 / 57.67</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.10\">4.77 / 40.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.11\">7.25 / 140.52</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.4.2.12\">9.32 / 67.57</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.1\">AvgSpeedup</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.2\">3.76</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.3\">7.01</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.4\">4.20</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.5\">7.48</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.6\">4.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.7\">7.86</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.8\">6.18</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.9\">9.99</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.10\">6.56</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.11\">13.64</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.5.3.12\">7.41</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.1\">MaxSpeedup</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.2\">4.74</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.3\">8.60</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.4\">6.89</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.5\">11.51</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.6\">7.91</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.7\">11.16</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.8\">14.99</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.9\">21.66</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.10\">13.49</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.11\">30.53</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.6.4.12\">14.10</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.7.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.1\">MinSpeedup</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.2\">2.29</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.3\">5.53</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.4\">2.19</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.5\">4.46</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.6\">3.16</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.7\">5.69</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.8\">3.07</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.9\">6.39</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.10\">3.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.11\">7.30</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.7.5.12\">5.23</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.8.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.1\">AvgTime (FlashGS on A100)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.2\">3.37</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.3\">4.87</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.4\">3.05</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.5\">4.58</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.6\">2.36</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.7\">3.98</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.8\">2.59</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.9\">4.35</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.10\">4.18</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.11\">6.55</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T1.1.1.8.6.12\">8.08</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S6.T1.3.1.1\" style=\"font-size:90%;\">Table 1</span>. </span><span class=\"ltx_text\" id=\"S6.T1.4.2\" style=\"font-size:90%;\">FlashGS average/slowest frame rendering time (ms) and corresponding FPS with speedup relative to 3DGS across different datasets and resolutions on 3090 GPU.\n</span></figcaption>\n</figure>",
|
| 232 |
+
"capture": "Table 1. FlashGS average/slowest frame rendering time (ms) and corresponding FPS with speedup relative to 3DGS across different datasets and resolutions on 3090 GPU.\n"
|
| 233 |
+
},
|
| 234 |
+
"2": {
|
| 235 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T2\">\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S6.T2.4\" style=\"width:272.1pt;height:66.6pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-47.8pt,11.7pt) scale(0.74,0.74) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S6.T2.4.4\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T2.4.4.5.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r\" id=\"S6.T2.4.4.5.1.1\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T2.4.4.5.1.2\">Before Rendering</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r\" id=\"S6.T2.4.4.5.1.3\">After Rendering</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T2.4.4.5.1.4\">#key-value pairs</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T2.4.4.6.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S6.T2.4.4.6.1.1\">3DGS</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.4.4.6.1.2\">7.86 GB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.4.4.6.1.3\">13.45 GB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T2.4.4.6.1.4\">56,148,670</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.2.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S6.T2.2.2.2.2\">gsplat ( )</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.2.2.2.3\">4.52 GB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.2.2.2.4\">10.75 GB</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T2.2.2.2.5\">56,996,302</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.4.4.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S6.T2.4.4.4.2\">gsplat ( )</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.4.4.4.3\">4.52 GB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.4.4.4.4\">9.83 GB</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T2.4.4.4.5\">56,998,101</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.4.4.7.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S6.T2.4.4.7.2.1\">FlashGS</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.4.4.7.2.2\">6.83 GB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S6.T2.4.4.7.2.3\">6.83 GB</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T2.4.4.7.2.4\">3,436,142</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S6.T2.6.1.1\" style=\"font-size:90%;\">Table 2</span>. </span><span class=\"ltx_text\" id=\"S6.T2.7.2\" style=\"font-size:90%;\">Memory allocated before and after rendering the 800th frame in MatrixCity dataset.</span></figcaption>\n</figure>",
|
| 236 |
+
"capture": "Table 2. Memory allocated before and after rendering the 800th frame in MatrixCity dataset."
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
"image_paths": {
|
| 240 |
+
"1": {
|
| 241 |
+
"figure_path": "2408.07967v2_figure_1.png",
|
| 242 |
+
"caption": "Figure 1. Two representative rendering output images with 3D Gaussian Splatting (Kerbl et al., 2023) and our FlashGS.",
|
| 243 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/time.png"
|
| 244 |
+
},
|
| 245 |
+
"2": {
|
| 246 |
+
"figure_path": "2408.07967v2_figure_2.png",
|
| 247 |
+
"caption": "Figure 2. 3DGS Overview",
|
| 248 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/3DGS-Overview_4x.png"
|
| 249 |
+
},
|
| 250 |
+
"3": {
|
| 251 |
+
"figure_path": "2408.07967v2_figure_3.png",
|
| 252 |
+
"caption": "Figure 3. Runtime breakdown of 3GDS rasterization on the MatrixCity(Li et al., 2023) dataset.",
|
| 253 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/profile.png"
|
| 254 |
+
},
|
| 255 |
+
"4": {
|
| 256 |
+
"figure_path": "2408.07967v2_figure_4.png",
|
| 257 |
+
"caption": "Figure 4. We evaluate the key-value pairs binning process from the rendering process of 6 frames in the scene trained from MatrixCity(Li et al., 2023) dataset. The number of assigned k-v pairs is much more than the number of tiles really covered by the AABB or the projected ellipse.",
|
| 258 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/RedundancyBarchart_ZimuLiao.png"
|
| 259 |
+
},
|
| 260 |
+
"5": {
|
| 261 |
+
"figure_path": "2408.07967v2_figure_5.png",
|
| 262 |
+
"caption": "Figure 5. Geometry Redundancies. There are 3 kinds of redundancies in original 3DGS intersection algorithm: I. The definition of ellipse ignores the opacity. II. The AABB is over-estimated. III. The tiles out of the ellipse are binned with the Gaussian.",
|
| 263 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/Redundancy_5x.png"
|
| 264 |
+
},
|
| 265 |
+
"6": {
|
| 266 |
+
"figure_path": "2408.07967v2_figure_6.png",
|
| 267 |
+
"caption": "Figure 6. Opacity distribution in the smallcity scene of MatrixCity dataset. The horizontal axis below the bar shows percentage ranging from 0% to 100%. These percentages correspond to the opacity values from 0 to 1 shown above the bar.",
|
| 268 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/opa.png"
|
| 269 |
+
},
|
| 270 |
+
"7": {
|
| 271 |
+
"figure_path": "2408.07967v2_figure_7.png",
|
| 272 |
+
"caption": "Figure 7. FlashGS Overview.",
|
| 273 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/FlashGS_Overvie_4x.png"
|
| 274 |
+
},
|
| 275 |
+
"8": {
|
| 276 |
+
"figure_path": "2408.07967v2_figure_8.png",
|
| 277 |
+
"caption": "Figure 8. intersection tiles with the ellipse (purple represents the real intersection tiles, green shows the tiles each method treats as intersected, and white means not). (a) 3DGS uses AABB and gets 16 tiles. (b) GScore applies OBB and gets 8 tiles. (c) Precise intersection shows only 4 tiles.",
|
| 278 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/Intersection_tiles_4x.png"
|
| 279 |
+
},
|
| 280 |
+
"9": {
|
| 281 |
+
"figure_path": "2408.07967v2_figure_9.png",
|
| 282 |
+
"caption": "Figure 9. Geometric simplification for precise per-tile intersection. A tile is considered intersected if the segment of the ellipse intersecting the line of the tile\u2019s edge coincides with the edge.",
|
| 283 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/Intersection_ZimuLiao_5x.png"
|
| 284 |
+
},
|
| 285 |
+
"10": {
|
| 286 |
+
"figure_path": "2408.07967v2_figure_10.png",
|
| 287 |
+
"caption": "Figure 10. Schematic for the workflow of original 3DGS and the improved FlashGS. We balance the computation and memory access across various stages and reduce redundant operations (M_c and M_b are indicative of the amount of computation and memory access, respectively).",
|
| 288 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/workflow-240723.png"
|
| 289 |
+
},
|
| 290 |
+
"11": {
|
| 291 |
+
"figure_path": "2408.07967v2_figure_11.png",
|
| 292 |
+
"caption": "Figure 11. Adaptive task partitioning for Gaussian intersections with varying sizes. If a large ellipse requires processing multiple tiles, other threads within the warp are utilized to collaborate on the intersection.",
|
| 293 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/intersection-adaptive-240716.png"
|
| 294 |
+
},
|
| 295 |
+
"12": {
|
| 296 |
+
"figure_path": "2408.07967v2_figure_12.png",
|
| 297 |
+
"caption": "Figure 12. Software pipelining to achieve better overlap between computation and memory access.",
|
| 298 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/pipeline-240721.png"
|
| 299 |
+
},
|
| 300 |
+
"13": {
|
| 301 |
+
"figure_path": "2408.07967v2_figure_13.png",
|
| 302 |
+
"caption": "Figure 13. Rasterization runtime breakdown on 6 representative frames from different datasets, normalized to 3DGS.",
|
| 303 |
+
"url": "http://arxiv.org/html/2408.07967v2/x1.png"
|
| 304 |
+
},
|
| 305 |
+
"14": {
|
| 306 |
+
"figure_path": "2408.07967v2_figure_14.png",
|
| 307 |
+
"caption": "Figure 14. Profiling results of FlashGS: The number of instructions issued in rendering and the memory transactions in preprocessing. All results are normalized to 3DGS.",
|
| 308 |
+
"url": "http://arxiv.org/html/2408.07967v2/x2.png"
|
| 309 |
+
},
|
| 310 |
+
"15": {
|
| 311 |
+
"figure_path": "2408.07967v2_figure_15.png",
|
| 312 |
+
"caption": "Figure 15. Number of rendered Gaussian-tile (kv) pairs and instructions issued per pair of FlashGS (Normalized to 3DGS).",
|
| 313 |
+
"url": "http://arxiv.org/html/2408.07967v2/x3.png"
|
| 314 |
+
},
|
| 315 |
+
"16": {
|
| 316 |
+
"figure_path": "2408.07967v2_figure_16.png",
|
| 317 |
+
"caption": "Figure 16. The rendering quality and rasterization time on Matricity(1080p)-800 frame.",
|
| 318 |
+
"url": "http://arxiv.org/html/2408.07967v2/x4.png"
|
| 319 |
+
},
|
| 320 |
+
"17": {
|
| 321 |
+
"figure_path": "2408.07967v2_figure_17.png",
|
| 322 |
+
"caption": "Figure 17. Above is the 800th image (the longest one), below is the 809th image (the shortest one), the left is the baseline, and the right is the FlashGS (ours)",
|
| 323 |
+
"url": "http://arxiv.org/html/2408.07967v2/extracted/5799392/Figures/time.png"
|
| 324 |
+
}
|
| 325 |
+
},
|
| 326 |
+
"validation": true,
|
| 327 |
+
"references": [
|
| 328 |
+
{
|
| 329 |
+
"1": {
|
| 330 |
+
"title": "Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF international conference on computer vision. 5855\u20135864.",
|
| 331 |
+
"author": "Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. 2021.",
|
| 332 |
+
"venue": "",
|
| 333 |
+
"url": null
|
| 334 |
+
}
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"2": {
|
| 338 |
+
"title": "A survey on 3d gaussian splatting.",
|
| 339 |
+
"author": "Guikun Chen and Wenguan Wang. 2024.",
|
| 340 |
+
"venue": "arXiv preprint arXiv:2401.03890 (2024).",
|
| 341 |
+
"url": null
|
| 342 |
+
}
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"3": {
|
| 346 |
+
"title": "DoGaussian: Distributed-Oriented Gaussian Splatting for Large-Scale 3D Reconstruction Via Gaussian Consensus.",
|
| 347 |
+
"author": "Yu Chen and Gim Hee Lee. 2024.",
|
| 348 |
+
"venue": "arXiv preprint arXiv:2405.13943 (2024).",
|
| 349 |
+
"url": null
|
| 350 |
+
}
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"4": {
|
| 354 |
+
"title": "Discrete-continuous optimization for large-scale structure from motion. In CVPR 2011. IEEE, 3001\u20133008.",
|
| 355 |
+
"author": "David Crandall, Andrew Owens, Noah Snavely, and Dan Huttenlocher. 2011.",
|
| 356 |
+
"venue": "",
|
| 357 |
+
"url": null
|
| 358 |
+
}
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"5": {
|
| 362 |
+
"title": "Depth-supervised nerf: Fewer views and faster training for free. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 12882\u201312891.",
|
| 363 |
+
"author": "Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. 2022.",
|
| 364 |
+
"venue": "",
|
| 365 |
+
"url": null
|
| 366 |
+
}
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"6": {
|
| 370 |
+
"title": "DISTWAR: Fast Differentiable Rendering on Raster-based Rendering Pipelines.",
|
| 371 |
+
"author": "Sankeerth Durvasula, Adrian Zhao, Fan Chen, Ruofan Liang, Pawan Kumar Sanjaya, and Nandita Vijaykumar. 2023.",
|
| 372 |
+
"venue": "arXiv preprint arXiv:2401.05345 (2023).",
|
| 373 |
+
"url": null
|
| 374 |
+
}
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"7": {
|
| 378 |
+
"title": "Lightgaussian: Unbounded 3d gaussian compression with 15x reduction and 200+ fps.",
|
| 379 |
+
"author": "Zhiwen Fan, Kevin Wang, Kairun Wen, Zehao Zhu, Dejia Xu, and Zhangyang Wang. 2023.",
|
| 380 |
+
"venue": "arXiv preprint arXiv:2311.17245 (2023).",
|
| 381 |
+
"url": null
|
| 382 |
+
}
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"8": {
|
| 386 |
+
"title": "3d gaussian splatting as new era: A survey.",
|
| 387 |
+
"author": "Ben Fei, Jingyi Xu, Rui Zhang, Qingyuan Zhou, Weidong Yang, and Ying He. 2024.",
|
| 388 |
+
"venue": "IEEE Transactions on Visualization and Computer Graphics (2024).",
|
| 389 |
+
"url": null
|
| 390 |
+
}
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"9": {
|
| 394 |
+
"title": "Eagles: Efficient accelerated 3d gaussians with lightweight encodings.",
|
| 395 |
+
"author": "Sharath Girish, Kamal Gupta, and Abhinav Shrivastava. 2023.",
|
| 396 |
+
"venue": "arXiv preprint arXiv:2312.04564 (2023).",
|
| 397 |
+
"url": null
|
| 398 |
+
}
|
| 399 |
+
},
|
| 400 |
+
{
|
| 401 |
+
"10": {
|
| 402 |
+
"title": "Deep blending for free-viewpoint image-based rendering.",
|
| 403 |
+
"author": "Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. 2018.",
|
| 404 |
+
"venue": "ACM Transactions on Graphics (ToG) 37, 6 (2018), 1\u201315.",
|
| 405 |
+
"url": null
|
| 406 |
+
}
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"11": {
|
| 410 |
+
"title": "Baking neural radiance fields for real-time view synthesis. In Proceedings of the IEEE/CVF international conference on computer vision. 5875\u20135884.",
|
| 411 |
+
"author": "Peter Hedman, Pratul P Srinivasan, Ben Mildenhall, Jonathan T Barron, and Paul Debevec. 2021.",
|
| 412 |
+
"venue": "",
|
| 413 |
+
"url": null
|
| 414 |
+
}
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"12": {
|
| 418 |
+
"title": "Efficientnerf efficient neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 12902\u201312911.",
|
| 419 |
+
"author": "Tao Hu, Shu Liu, Yilun Chen, Tiancheng Shen, and Jiaya Jia. 2022.",
|
| 420 |
+
"venue": "",
|
| 421 |
+
"url": null
|
| 422 |
+
}
|
| 423 |
+
},
|
| 424 |
+
{
|
| 425 |
+
"13": {
|
| 426 |
+
"title": "3D Gaussian Splatting for Real-Time Radiance Field Rendering.",
|
| 427 |
+
"author": "Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuehler, and George Drettakis. 2023.",
|
| 428 |
+
"venue": "ACM Transactions on Graphics (TOG) 42 (2023), 1\u201314.",
|
| 429 |
+
"url": null
|
| 430 |
+
}
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"14": {
|
| 434 |
+
"title": "Tanks and temples: Benchmarking large-scale scene reconstruction.",
|
| 435 |
+
"author": "Arno Knapitsch, Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. 2017.",
|
| 436 |
+
"venue": "ACM Transactions on Graphics (ToG) 36, 4 (2017), 1\u201313.",
|
| 437 |
+
"url": null
|
| 438 |
+
}
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"15": {
|
| 442 |
+
"title": "GSCore: Efficient Radiance Field Rendering via Architectural Support for 3D Gaussian Splatting. In Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 3. 497\u2013511.",
|
| 443 |
+
"author": "Junseo Lee, Seokwon Lee, Jungi Lee, Junyong Park, and Jaewoong Sim. 2024.",
|
| 444 |
+
"venue": "",
|
| 445 |
+
"url": null
|
| 446 |
+
}
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"16": {
|
| 450 |
+
"title": "Matrixcity: A large-scale city dataset for city-scale neural rendering and beyond. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 3205\u20133215.",
|
| 451 |
+
"author": "Yixuan Li, Lihan Jiang, Linning Xu, Yuanbo Xiangli, Zhenzhi Wang, Dahua Lin, and Bo Dai. 2023.",
|
| 452 |
+
"venue": "",
|
| 453 |
+
"url": null
|
| 454 |
+
}
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"17": {
|
| 458 |
+
"title": "Neural sparse voxel fields.",
|
| 459 |
+
"author": "Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. 2020.",
|
| 460 |
+
"venue": "Advances in Neural Information Processing Systems 33 (2020), 15651\u201315663.",
|
| 461 |
+
"url": null
|
| 462 |
+
}
|
| 463 |
+
},
|
| 464 |
+
{
|
| 465 |
+
"18": {
|
| 466 |
+
"title": "Scaffold-gs: Structured 3d gaussians for view-adaptive rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 20654\u201320664.",
|
| 467 |
+
"author": "Tao Lu, Mulin Yu, Linning Xu, Yuanbo Xiangli, Limin Wang, Dahua Lin, and Bo Dai. 2024.",
|
| 468 |
+
"venue": "",
|
| 469 |
+
"url": null
|
| 470 |
+
}
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"19": {
|
| 474 |
+
"title": "NeRF: Representing scenes as neural radiance fields for view synthesis.",
|
| 475 |
+
"author": "Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. 2021.",
|
| 476 |
+
"venue": "Commun. ACM 65, 1 (2021), 99\u2013106.",
|
| 477 |
+
"url": null
|
| 478 |
+
}
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"20": {
|
| 482 |
+
"title": "Instant neural graphics primitives with a multiresolution hash encoding.",
|
| 483 |
+
"author": "Thomas M\u00fcller, Alex Evans, Christoph Schied, and Alexander Keller. 2022.",
|
| 484 |
+
"venue": "ACM transactions on graphics (TOG) 41, 4 (2022), 1\u201315.",
|
| 485 |
+
"url": null
|
| 486 |
+
}
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"21": {
|
| 490 |
+
"title": "Mega-nerf: Scalable construction of large-scale nerfs for virtual fly-throughs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 12922\u201312931.",
|
| 491 |
+
"author": "Haithem Turki, Deva Ramanan, and Mahadev Satyanarayanan. 2022.",
|
| 492 |
+
"venue": "",
|
| 493 |
+
"url": null
|
| 494 |
+
}
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"22": {
|
| 498 |
+
"title": "Fourier plenoctrees for dynamic radiance field rendering in real-time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 13524\u201313534.",
|
| 499 |
+
"author": "Liao Wang, Jiakai Zhang, Xinhang Liu, Fuqiang Zhao, Yanshun Zhang, Yingliang Zhang, Minye Wu, Jingyi Yu, and Lan Xu. 2022.",
|
| 500 |
+
"venue": "",
|
| 501 |
+
"url": null
|
| 502 |
+
}
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"23": {
|
| 506 |
+
"title": "Nerfingmvs: Guided optimization of neural radiance fields for indoor multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 5610\u20135619.",
|
| 507 |
+
"author": "Yi Wei, Shaohui Liu, Yongming Rao, Wang Zhao, Jiwen Lu, and Jie Zhou. 2021.",
|
| 508 |
+
"venue": "",
|
| 509 |
+
"url": null
|
| 510 |
+
}
|
| 511 |
+
},
|
| 512 |
+
{
|
| 513 |
+
"24": {
|
| 514 |
+
"title": "Diver: Real-time and accurate neural radiance fields with deterministic integration for volume rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 16200\u201316209.",
|
| 515 |
+
"author": "Liwen Wu, Jae Yong Lee, Anand Bhattad, Yu-Xiong Wang, and David Forsyth. 2022.",
|
| 516 |
+
"venue": "",
|
| 517 |
+
"url": null
|
| 518 |
+
}
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"25": {
|
| 522 |
+
"title": "Recent advances in 3d gaussian splatting.",
|
| 523 |
+
"author": "Tong Wu, Yu-Jie Yuan, Ling-Xiao Zhang, Jie Yang, Yan-Pei Cao, Ling-Qi Yan, and Lin Gao. 2024.",
|
| 524 |
+
"venue": "Computational Visual Media (2024), 1\u201330.",
|
| 525 |
+
"url": null
|
| 526 |
+
}
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"26": {
|
| 530 |
+
"title": "Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 5438\u20135448.",
|
| 531 |
+
"author": "Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. 2022.",
|
| 532 |
+
"venue": "",
|
| 533 |
+
"url": null
|
| 534 |
+
}
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"27": {
|
| 538 |
+
"title": "Multi-scale 3d gaussian splatting for anti-aliased rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 20923\u201320931.",
|
| 539 |
+
"author": "Zhiwen Yan, Weng Fei Low, Yu Chen, and Gim Hee Lee. 2024.",
|
| 540 |
+
"venue": "",
|
| 541 |
+
"url": null
|
| 542 |
+
}
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"28": {
|
| 546 |
+
"title": "Mathematical Supplement for the gsplat Library.",
|
| 547 |
+
"author": "Vickie Ye and Angjoo Kanazawa. 2023.",
|
| 548 |
+
"venue": "",
|
| 549 |
+
"url": null
|
| 550 |
+
}
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"29": {
|
| 554 |
+
"title": "Plenoctrees for real-time rendering of neural radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 5752\u20135761.",
|
| 555 |
+
"author": "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. 2021.",
|
| 556 |
+
"venue": "",
|
| 557 |
+
"url": null
|
| 558 |
+
}
|
| 559 |
+
},
|
| 560 |
+
{
|
| 561 |
+
"30": {
|
| 562 |
+
"title": "Fregs: 3d gaussian splatting with progressive frequency regularization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 21424\u201321433.",
|
| 563 |
+
"author": "Jiahui Zhang, Fangneng Zhan, Muyu Xu, Shijian Lu, and Eric Xing. 2024.",
|
| 564 |
+
"venue": "",
|
| 565 |
+
"url": null
|
| 566 |
+
}
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"31": {
|
| 570 |
+
"title": "On Scaling Up 3D Gaussian Splatting Training.",
|
| 571 |
+
"author": "Hexu Zhao, Haoyang Weng, Daohan Lu, Ang Li, Jinyang Li, Aurojit Panda, and Saining Xie. 2024.",
|
| 572 |
+
"venue": "arXiv preprint arXiv:2406.18533 (2024).",
|
| 573 |
+
"url": null
|
| 574 |
+
}
|
| 575 |
+
}
|
| 576 |
+
],
|
| 577 |
+
"url": "http://arxiv.org/html/2408.07967v2"
|
| 578 |
+
}
|
20240819/2408.08673v2.json
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "MAT-SED: A Masked Audio Transformer with Masked-Reconstruction Based Pre-training for Sound Event Detection",
|
| 3 |
+
"abstract": "Sound event detection (SED) methods that leverage a large pre-trained Transformer encoder network have shown promising performance in recent DCASE challenges.\nHowever, they still rely on an RNN-based context network to model temporal dependencies, largely due to the scarcity of labeled data.\nIn this work, we propose a pure Transformer-based SED model with masked-reconstruction based pre-training, termed MAT-SED.\nSpecifically, a Transformer with relative positional encoding is first designed as the context network, pre-trained by the\nmasked-reconstruction task on all available target data in a self-supervised way.\nBoth the encoder and the context network are jointly fine-tuned in a semi-supervised manner. Furthermore, a global-local feature fusion strategy is proposed to enhance the localization capability.\nEvaluation of MAT-SED on DCASE2023 task4 surpasses state-of-the-art performance, achieving 0.587/0.896 PSDS1/PSDS2 respectively.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Sound event detection (SED) aims to recognize not only what events are happening in an audio signal but also when those events are happening.\nRecent research in this field has garnered increasing interest from both academic and industrial sectors.\nThe DCASE challenges 111https://dcase.community/challenge2023/ ###reference_### have been conducted to evaluate the performance of systems in environmental sound classification and detection, significantly driving the advancement of SED research.\nThis technology is widely used in various applications, such as smart homes [1 ###reference_b1###], smart city [2 ###reference_b2###], surveillance [3 ###reference_b3###], etc.\nMost recent SED architecture can generally be divided into an encoder network and a context network, as illustrated in Figure 1 ###reference_###.\nIn classical CRNN based SED systems [4 ###reference_b4###], convolutional neural networks (CNNs) are used as the encoder network for feature extraction, while recurrent neural networks (RNNs) are employed as the context network to model temporal dependencies across latent features from the encoder.\nThe scarcity of labeled data is always a significant challenge for the SED task, due to the high cost of strong annotation for sound events.\nSemi-supervised methods, such as mean-teacher [5 ###reference_b5###], have thus been introduced to utilize large amounts of unlabeled data to mitigate the impact of insufficient labeled data.\n###figure_1### Recently, Transformer-based SED models have surged in popularity, inspired by the successes of Transformers in various domains, including natural language processing [6 ###reference_b6###, 7 ###reference_b7###], computer vision [8 ###reference_b8###] and automatic speech recognition [9 ###reference_b9###, 10 ###reference_b10###].\nConvolution-augmented Transformer [11 ###reference_b11###] utilizes Conformer [10 ###reference_b10###] instead of RNN to model temporal dependencies, winning the first place in DCASE2020 Task 4.\nThat work demonstrated the potential of Transformer-based structures for SED, though performance was still limited due to insufficient labeled data.\nTo mitigate the problem of data scarcity, a widely used approach is to employ Transformer models pre-trained on readily available large-scale audio tagging datasets to serve as powerful feature extractors.\nAmong high-ranking models [12 ###reference_b12###, 13 ###reference_b13###] of DCASE2023, the pre-trained Transformer and the CNN are concatenated in parallel as the encoder network, which can take the advantages of global and local features from different encoders.\nHowever, it is worth noting that most of those works only applied Transformer structures partially to the traditional CRNN.\nAgain this is due to data scarcity issues.\nFurthermore, although powerful encoder networks can be obtained by pre-training, it is still difficult to train the downstream context network with limited labeled data.\nThis remains a challenge to apply the pure Transformer-based structure for the SED task.\nIn this work, we present a pure Transformer-based SED model, termed Masked Audio Transformer for Sound Event Detection (MAT-SED).\nMAT-SED begins with the pre-trained Transformer model as an encoder network, then a Transformer with relative positional encoding instead of RNNs as the context network, which can better capture long-range context dependencies of latent features.\nThe Transformer structures lack some of the inductive biases inherent to RNNs, such as sequentiality, which makes the Transformer-based context networks do not generalize well when trained on insufficient data.\nTo address this problem, we use the masked-reconstruction task to pre-train the context network in the self-supervised manner, then fine-tune the pre-trained model with the classical mean-teacher algorithm.\nThis training paradigm maximizes the utilization of large quantities of unlabeled data compared to pure semi-supervised learning.\nThe global-local feature fusion strategy is employed to enhance the model\u2019s localization accuracy in the fine-tuning stage.\nExperimental results on the DCASE2023 dataset show that the proposed MAT-SED achieves 0.587/0.896 PSDS1/PSDS2, surpassing state-of-the-art SED systems, thus demonstrating the potential of our approach.\n 222The code is available at\nhttps://github.com/cai525/Transformer4SED ###reference_###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Methodology",
|
| 15 |
+
"text": "In this section, we first outline the model structure of MAT-SED, then introduce the masked-reconstruction based pre-training and the fine-tuning strategies."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Model",
|
| 21 |
+
"text": "The overall structure of MAT-SED, as shown in Figure 1 ###reference_###, consists of two main components: the encoder network and the context network.\nThe encoder network is used to extract features from the mel-spectrogram, outputting latent feature sequences.\nThe context network is responsible for capturing temporal dependencies across the latent features.\nDifferent types of head layer follow the context network to handle specific tasks, such as reconstruction, audio tagging and SED."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.1.1",
|
| 25 |
+
"parent_section_id": "2.1",
|
| 26 |
+
"section_name": "2.1.1 Encoder network",
|
| 27 |
+
"text": "The encoder network of MAT-SED is based on PaSST [14 ###reference_b14###], a large pre-trained Transformer model for audio tagging.\nEach mel-spectrogram is divided into several patches, then patches are projected linearly to a sequence of embeddings.\nThe sequence traverses through 10 layers of PaSST blocks consisted of Transformers.\nFollowing PaSST, the frequency dimension is compressed via average pooling, succeeded by 10 times linear upsampling to restore the temporal resolution lost during the patching process.\nThe output of the encoder network is denoted as , where C is the dimension of the embedding vector, and T is the length of encoder\u2019s output in the time dimension."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.1.2",
|
| 31 |
+
"parent_section_id": "2.1",
|
| 32 |
+
"section_name": "2.1.2 Context network",
|
| 33 |
+
"text": "Instead of the conventional RNN structure, we utilize 3 layers of Transformer block to constitute the context network.\nGiven the crucial need for localization in the SED task, integrating positional information becomes vital.\nWhile RNN structures naturally embed positional information along the time dimension through their sequential structure [15 ###reference_b15###], Transformer models require positional encoding for the same purpose.\nThe vanilla Transformer uses the absolute positional encoding (APE) [6 ###reference_b6###, 7 ###reference_b7###], where the positional encoding depends on absolute position of tokens.\nBut for a given sound event, we hope that the model is translation equivariant along time dimension, i.e. when the time of a sound event in an audio signal is changed, the same features will be detected at the new time.\nWe therefore use relative positional encoding (RPE) [16 ###reference_b16###] to achieve this purpose, where the learnable positional encoding is determined by the relative position between frames.\nCompared to learnable APE, the RPE is naturally translation-equivariant [17 ###reference_b17###], making it more suitable for modelling temporal dependencies.\n###figure_2###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.2",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "Masked-reconstruction based pre-training",
|
| 39 |
+
"text": "The model structure during pre-training is depicted in Figure 1 ###reference_### (a).\nAt this stage, we initialize the encoder network using the PaSST model pre-trained on AudioSet [18 ###reference_b18###] and freeze its weights, to focus on pre-training the context network.\nWe design the masked-reconstruction task as the pretext task, similar to train a masked language model.\nWe mask a certain proportion of frames in the latent feature sequence , and substitute the masked frames with the learnable mask token, obtaining a new sequence .\nThe masked-reconstruction task requires the context network to restore the masked latent features using the contextual information, which helps to enhance the temporal modeling ability of the context network.\nFor the masking strategy, we adopt the block-wise masking strategy used in [19 ###reference_b19###], dividing the sequence into several blocks of size 10, and masking entire blocks randomly.\nCompared to random masking, the block-wise masking strategy increases the difficulty of reconstruction, thus forces the model to learn more abstract semantic information.\nThe masked sequence traverses through the context network and the reconstruction head composed of two fully connected layers, yielding the reconstructed sequence .We use mean squared error (MSE) loss to evaluate the quality of reconstruction:\nwhere denotes the pre-training dataset, and denotes the set of masked frame indices corresponding to the sample .\nNote from this that only the masked frames are used to calculate the reconstruction loss."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.3",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "Fine-tuning",
|
| 45 |
+
"text": "The model structure in the fine-tuning stage is shown in Figure 1 ###reference_### (b).\nDuring fine-tuning, the reconstruction head is replaced by the SED head composed of a fully connected layer, which outputs the frame-level prediction.\nThe frame-level prediction is pooled over the time dimension by linear-softmax pooling [22 ###reference_b22###], to obtain the clip-level prediction result.\nFollowing the task-aware module in [20 ###reference_b20###], we additionally set up an AT head to focuse on the audio tagging task.\nThe mean-teacher algorithm [5 ###reference_b5###] is used for semi-supervised learning, with the consistency weight of 40.\nPrevious studies [23 ###reference_b23###, 24 ###reference_b24###] have shown that using the window mechanism to limit the input duration can constrain the model to better attend to local information, thereby enhancing localization accuracy.\nWe thus propose a novel strategy for fine-tuning, termed global-local feature fusion strategy, as depicted in Figure 2 ###reference_###.\nThis strategy uses two branches to extract different features from the spectrogram.\nThe global branch feeds the original spectrogram into the encoder network, yielding the global feature sequence . In the local branch, the spectrogram is split into several overlapping chunks along the time dimension by sliding windows. Each chunk is then independently fed into the encoder network for feature extraction, and the output features from different chunks are aggregated to form the local feature sequence . Since there is temporal overlap between nearby chunks, we average the features extracted from different chunks for the overlapping duration. Global features and local features are fused linearly to obtain fused features :\nIn our model, is set to 0.5 so fused features combine both local and global characteristics. It thus works well for sound events of varying duration.\nSince the overlap between chunks increases the memory consumption significantly , the feature fusion strategy is only used in the teacher model of the mean-teacher algorithm, which does not require back-propagation."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Experimental Setup",
|
| 51 |
+
"text": ""
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.1",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Dataset",
|
| 57 |
+
"text": "The self-supervised pre-training and fine-tuning are both conducted on the DCASE2023 [25 ###reference_b25###] dataset, which is designed to detect sound event classes in domestic environments.\nThe training set consists of 10-second audio clips, including 1578 weakly-labeled clips, 3470 strongly-labeled clips, 10000 synthetic-strongly labeled clips, and 14412 unlabeled in-domain clips. The model is evaluated on the DCASE2023 challenge task 4 validation set, consisting of 1168 strongly-labeled clips."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.2",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Feature extraction and evaluation setting",
|
| 63 |
+
"text": "The input audio is sampled at 32kHz. For feature extraction, we use a Hamming window of 25ms with a stride of 10ms to perform short-time Fourier transform(STFT). The spectrum obtained by the STFT is further transformed into a mel-spectrogram with 128 mel filters. Mixup [26 ###reference_b26###], time shift and filterAugment [27 ###reference_b27###] are used for data augmentation.\nThe polyphonic sound detection score (PSDS) [28 ###reference_b28###] is used as the evaluation metric.\nFollowing the setting of DCASE2023 competition, we use two different metric settings, PSDS1 and PSDS2, for two different scenarios.\nThe former focuses more on event localization, while the latter aims to avoid confusing between classes but for which localization is less crucial.\nSince PSDS1 can better reflect the model\u2019s localization performance, we use PSDS1 as the main evaluation metric in our experiments.\nIn the testing phase, median filter and maximum filter are applied to the two PSDS scenarios respectively for post-processing [29 ###reference_b29###]."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.3",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "Model and training setting",
|
| 69 |
+
"text": "For the sliding windows in the global-local feature fusion strategy, the window size and step are set to 5s and 0.3s. The context network contains 3 Transformer blocks with input dimension 768 , 12 attention heads, and expansion ratio 1.\nDuring the pre-training phase, the model is trained over 6000 steps with a batch size of 24 and a learning rate of .\nFor the masked-reconstruction task, the masking rate is set to 75%. During the fine-tuning stage, batch sizes for real strongly labeled, synthetic strongly labeled, real weakly labeled, and real unlabeled data are set to 3, 1, 4, 4, respectively.\nFollowing the strategy in [30 ###reference_b30###], only the SED head and AT head are trained for the first 6000 steps of fine-tuning, then the end-to-end fine-tuning is performed over the next 12000 steps.\nLearning rates for the encoder network, decoder network, and head layers are set to , , and , respectively.\nThe AdamW [31 ###reference_b31###] optimizer is used for optimization with a weight decay of . Training is conducted on 2 Intel-3090 GPUs for 13 hours in total."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Results",
|
| 75 |
+
"text": "In this section, we first compare the performance of MAT-SED against other state-of-the-art SED models.\nThen, we conduct ablation experiments to analyze the contributions of each MAT-SED component."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.1",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Performance of the proposed methods",
|
| 81 |
+
"text": "Table 1 ###reference_### compares the performance of MAT-SED with other SED systems on the DCASE2023 dataset, where CRNN-BEATs is the baseline model of DCASE2023 task4.\nOur model achieves 0.587 PSDS1 and 0.896 PSDS2, outperforming previous SOTA models. It is noteworthy that MAT-SED stands out as the only model composed of pure Transformers in the table, whereas other models rely on CNN or RNN structures.\nThis shows that the pure Transformer structure can perform well on SED tasks, given appropriate pre-training."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.2",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Ablation studies",
|
| 87 |
+
"text": ""
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.2.1",
|
| 91 |
+
"parent_section_id": "4.2",
|
| 92 |
+
"section_name": "4.2.1 Ablations of the context network",
|
| 93 |
+
"text": "First, we explore the impact of different context network structures, as shown in Table 2 ###reference_###. Masked-reconstruction pre-training is employed in each set of experiments, and the hyperparameters of different structures are adjusted to the best. We use learnable APE in place of RPE to measure the effect of RPE. It can be seen from the table that the PSDS1 score of using RPE is significantly higher than APE, which indicates that the necessity of RPE for the SED task. Then we test the performance of Conformer [10 ###reference_b10###] for the context network. It can be seen from the table that Conformer achieves a PSDS1 of 0.544, trailing behind the Transformer using RPE, even though RPE is also utilized by Conformer. We suppose that the possible reason is that the convolution module in Conformer increases the parameter size, which makes it too bulky for the context network. Lastly, we substitute Transformers with GRU to compare the performance of RNNs and Transformers as the context network. The GRU achieves the PSDS1 of 0.557, lower than the Transformer using RPE, indicating that the Transformer with RPE serves as a more powerful context network structure than RNNs.\n###figure_3###"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.2.2",
|
| 97 |
+
"parent_section_id": "4.2",
|
| 98 |
+
"section_name": "4.2.2 Ablations of masked-reconstruction based pre-training",
|
| 99 |
+
"text": "In this section, we analyze the effect of the masked-reconstruction pre-training. Figure 3 ###reference_### compares the convergence curves of training MAT-SED from scratch and end-to-end fine-tuning after masked-reconstruction pre-training. For the pre-trained model, the SED layer and AT layer are trained before the end-to-end fine-tuning to adjust to the features from pre-trained context network. The pre-trained network achieves a PSDS1 of 0.502 at the beginning of end-to-end fine-tuning, even higher than the DCASE2023 baseline model, indicating that the representation learned by the context network in the masked-reconstruction pre-training is well-suited for the SED task. During the subsequent end-to-end fine-tuning process, the optimal PSDS1 score for the network without pre-training is 0.563, noticeably lower than the pre-training network. On the other hand, severe overfitting occurs in the network without pre-training , which is not apparent in the pre-trained network. The results shows the efficacy of masked-reconstruction pre-training in enhancing Transformer-based context network\u2019s ability to model temporal dependencies, thus benefiting the localization of sound events.\nFigure 4 ###reference_### further compares the effect of masking ratio in the masked-reconstruction pre-training. It can be seen from the figure that the optimal masking ratio is 75%, relatively higher compared to Bert (15%) [7 ###reference_b7###]. A high masking ratio helps the model to learn abstract semantic features, rather than restoring the masked frames by simply interpolation. Similar conclusions have also been found in other self-supervised learning works based on masking [30 ###reference_b30###, 32 ###reference_b32###].\n###figure_4###"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4.2.3",
|
| 103 |
+
"parent_section_id": "4.2",
|
| 104 |
+
"section_name": "4.2.3 Ablations of the global-local feature fusion strategy",
|
| 105 |
+
"text": "In this section, we analyze the effect of the global-local feature fusion strategy in the fine-tuning stage.\nIn the feature fusion strategy, the hyperparameter controls the proportion of the global and local branches in the fused features. When , only local features are retained; when , only global features are retained, which means that the sliding window mechanism no longer works. In Table 3 ###reference_###, we compare the PSDS1 scores when is set to 0, 0.5 and 1. The experimental results show that higher PSDS1 is achieved when than the cases when is set to 0 or 1, indicating that fusing the global and local features can obtain more powerful latent features than only relying on either side."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "5",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Conclusion",
|
| 111 |
+
"text": "In this paper, we propose MAT-SED, a pure Transformer-based SED model.\nIn MAT-SED, the Transformer with relative positional encoding is employed as the context network, which enables the model to capture long-range context dependencies.\nThe masked-reconstruction task is used to pre-train the Transformer-based context network before semi-supervised based fine-tuning. The global-local feature fusion strategy is employed to further enhance the model\u2019s localization accuracy.\nMAT-SED achieves advanced performance on DCASE2023 dataset, outperforming other state-of-the-art SED models. Ablation experiments show that the self-supervised pre-training is crucial for Transformer-based structures.\nIn the future, we aim to further explore self-supervised learning methods for audio Transformer structures."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [],
|
| 115 |
+
"tables": {
|
| 116 |
+
"1": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<figcaption class=\"ltx_caption\" style=\"font-size:80%;\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Comparison with state-of-the-art SED systems (\u00a0<sup class=\"ltx_sup\" id=\"S2.T1.8.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S2.T1.8.1.1\">\u2020</span></sup> denotes external data is used besides the DCASE2023 dataset).</figcaption>\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S2.T1.3\">\n<tr class=\"ltx_tr\" id=\"S2.T1.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.2.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.2.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:0.8pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span><span class=\"ltx_text\" id=\"S2.T1.3.2.1.2\" style=\"font-size:80%;\">\n</span><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.2.1.3\" style=\"font-size:80%;\">Model</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.2.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.2.2.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.2.2.1.1.1\" style=\"font-size:80%;\">Encoder Network</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.2.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.2.3.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.2.3.1.1.1\" style=\"font-size:80%;\">Context Network</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.2.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.2.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.2.4.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.2.4.1.1.1\" style=\"font-size:80%;\">PSDS1</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.2.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.2.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.2.5.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.2.5.1.1.1\" style=\"font-size:80%;\">PSDS2</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.3.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.3.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.3.1.1.1.1\" style=\"font-size:80%;\">CRNN-BEATs</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.3.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.3.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.3.2.1.1.1\" style=\"font-size:80%;\">Transformer + CNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.3.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.3.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.3.3.1.1.1\" style=\"font-size:80%;\">RNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.3.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.3.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.3.4.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.3.4.1.1.1\" style=\"font-size:80%;\">0.500</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.3.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.3.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.3.5.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.3.5.1.1.1\" style=\"font-size:80%;\">0.762</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.4.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.4.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.4.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.4.1.1.1.1\" style=\"font-size:80%;\">PaSST-SED\u00a0</span><cite class=\"ltx_cite ltx_citemacro_cite\"><span class=\"ltx_text\" id=\"S2.T1.3.4.1.1.1.2.1\" style=\"font-size:80%;\">[</span><a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.08673v2#bib.bib20\" title=\"\">20</a><span class=\"ltx_text\" id=\"S2.T1.3.4.1.1.1.3.2\" style=\"font-size:80%;\">]</span></cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.4.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.4.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.4.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.4.2.1.1.1\" style=\"font-size:80%;\">Transformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.4.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.4.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.4.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.4.3.1.1.1\" style=\"font-size:80%;\">RNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.4.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.4.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.4.4.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.4.4.1.1.1\" style=\"font-size:80%;\">0.555</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.4.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.4.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.4.5.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.4.5.1.1.1\" style=\"font-size:80%;\">0.791</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.1.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.1.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.1.1.1.1.1\" style=\"font-size:80%;\">MFDConv-BEATs</span><sup class=\"ltx_sup\" id=\"S2.T1.3.1.1.1.1.2\"><span class=\"ltx_text ltx_font_italic\" id=\"S2.T1.3.1.1.1.1.2.1\" style=\"font-size:80%;\">\u2020</span></sup><span class=\"ltx_text\" id=\"S2.T1.3.1.1.1.1.3\" style=\"font-size:80%;\">\u00a0</span><cite class=\"ltx_cite ltx_citemacro_cite\"><span class=\"ltx_text\" id=\"S2.T1.3.1.1.1.1.4.1\" style=\"font-size:80%;\">[</span><a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.08673v2#bib.bib13\" title=\"\">13</a><span class=\"ltx_text\" id=\"S2.T1.3.1.1.1.1.5.2\" style=\"font-size:80%;\">]</span></cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.1.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.1.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.1.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.1.2.1.1.1\" style=\"font-size:80%;\">Transformer + CNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.1.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.1.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.1.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.1.3.1.1.1\" style=\"font-size:80%;\">RNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.1.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.1.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.1.4.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.1.4.1.1.1\" style=\"font-size:80%;\">0.552</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.1.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.1.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.1.5.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.1.5.1.1.1\" style=\"font-size:80%;\">0.794</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.5.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.5.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.5.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.5.1.1.1.1\" style=\"font-size:80%;\">ATST-SED\u00a0</span><cite class=\"ltx_cite ltx_citemacro_cite\"><span class=\"ltx_text\" id=\"S2.T1.3.5.1.1.1.2.1\" style=\"font-size:80%;\">[</span><a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.08673v2#bib.bib21\" title=\"\">21</a><span class=\"ltx_text\" id=\"S2.T1.3.5.1.1.1.3.2\" style=\"font-size:80%;\">]</span></cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.5.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.5.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.5.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.5.2.1.1.1\" style=\"font-size:80%;\">Transformer + CNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.5.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.5.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.5.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.5.3.1.1.1\" style=\"font-size:80%;\">RNN</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.5.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.5.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.5.4.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.5.4.1.1.1\" style=\"font-size:80%;\">0.583</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.5.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.5.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.5.5.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.5.5.1.1.1\" style=\"font-size:80%;\">0.810</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.6\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.6.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.6.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.6.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.6.1.1.1.1\" style=\"font-size:80%;\">MAT-SED\u00a0(median filter)</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.6.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.6.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.6.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.6.2.1.1.1\" style=\"font-size:80%;\">Transformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.6.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.6.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.6.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.6.3.1.1.1\" style=\"font-size:80%;\">Transformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.6.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.6.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.6.4.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.6.4.1.1.1\" style=\"font-size:80%;\">0.587</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S2.T1.3.6.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.6.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.6.5.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.6.5.1.1.1\" style=\"font-size:80%;\">0.792</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.7\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.7.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.7.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.7.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.7.1.1.1.1\" style=\"font-size:80%;\">MAT-SED\u00a0(maximum filter)</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.7.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.7.2.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.7.2.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.7.2.1.1.1\" style=\"font-size:80%;\">Transformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.7.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.7.3.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.7.3.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.7.3.1.1.1\" style=\"font-size:80%;\">Transformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.7.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.7.4.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.7.4.1.1\"><span class=\"ltx_text\" id=\"S2.T1.3.7.4.1.1.1\" style=\"font-size:80%;\">0.090</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.7.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.7.5.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.7.5.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.3.7.5.1.1.1\" style=\"font-size:80%;\">0.896</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.8\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.8.1\" style=\"width:99.6pt;padding-top:1.6pt;padding-bottom:1.6pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S2.T1.3.8.1.1\">\n<span class=\"ltx_p\" id=\"S2.T1.3.8.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:0.8pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.8.2\" style=\"width:85.4pt;padding-top:1.6pt;padding-bottom:1.6pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.8.3\" style=\"width:71.1pt;padding-top:1.6pt;padding-bottom:1.6pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.8.4\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S2.T1.3.8.5\" style=\"width:42.7pt;padding-top:1.6pt;padding-bottom:1.6pt;\"></td>\n</tr>\n</table>\n</figure>",
|
| 118 |
+
"capture": "Table 1: Comparison with state-of-the-art SED systems (\u00a0\u2020 denotes external data is used besides the DCASE2023 dataset)."
|
| 119 |
+
},
|
| 120 |
+
"2": {
|
| 121 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption\" style=\"font-size:80%;\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Ablation study on the context network. \"RPE\" and \"APE\" denote relative positional encoding and absolute positional encoding respectively.</figcaption>\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T2.3\">\n<tr class=\"ltx_tr\" id=\"S4.T2.3.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.1.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.1.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:0.8pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span><span class=\"ltx_text\" id=\"S4.T2.3.1.1.2\" style=\"font-size:80%;\">\n</span><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.1.1.3\" style=\"font-size:80%;\">Context Network</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.1.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.1.2.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.1.2.1.1.1\" style=\"font-size:80%;\">RPE</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.1.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.1.3.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.1.3.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.1.3.1.1.1\" style=\"font-size:80%;\">PSDS1</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T2.3.2.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.2.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.2.1.1.1.1\" style=\"font-size:80%;\">Transformer with RPE</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T2.3.2.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.2.2.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.2.2.1.1.1\" style=\"font-size:80%;\">\u2713</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T2.3.2.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.2.3.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.2.3.1.1.1\" style=\"font-size:80%;\">0.587</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.3.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.3.1.1.1.1\" style=\"font-size:80%;\">Transformer with APE</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.3.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.2.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.3.2.1.1.1\" style=\"font-size:80%;\">\u2717</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.3.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.3.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.3.3.1.1.1\" style=\"font-size:80%;\">0.540</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.4.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.4.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.4.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.4.1.1.1.1\" style=\"font-size:80%;\">Conformer</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.4.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.4.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.4.2.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.4.2.1.1.1\" style=\"font-size:80%;\">\u2713</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.4.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.4.3.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.4.3.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.4.3.1.1.1\" style=\"font-size:80%;\">0.544</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.5.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.5.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.5.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.5.1.1.1.1\" style=\"font-size:80%;\">GRU</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.5.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.5.2.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.5.2.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.5.2.1.1.1\" style=\"font-size:80%;\">\u2717</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.5.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.5.3.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.5.3.1.1\"><span class=\"ltx_text\" id=\"S4.T2.3.5.3.1.1.1\" style=\"font-size:80%;\">0.557</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.6\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.6.1\" style=\"width:85.4pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T2.3.6.1.1\">\n<span class=\"ltx_p\" id=\"S4.T2.3.6.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:0.8pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.6.2\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T2.3.6.3\" style=\"width:22.8pt;padding-top:0.8pt;padding-bottom:0.8pt;\"></td>\n</tr>\n</table>\n</figure>",
|
| 122 |
+
"capture": "Table 2: Ablation study on the context network. \"RPE\" and \"APE\" denote relative positional encoding and absolute positional encoding respectively."
|
| 123 |
+
},
|
| 124 |
+
"3": {
|
| 125 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption\" style=\"font-size:80%;\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Ablation study on the hyperparameters in \n<br class=\"ltx_break\"/>the global-local feature fusion strategy.</figcaption>\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T3.3\">\n<tr class=\"ltx_tr\" id=\"S4.T3.3.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_r\" id=\"S4.T3.3.1.1\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.1.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:1.0pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span><span class=\"ltx_text\" id=\"S4.T3.3.1.1.2\" style=\"font-size:80%;\">\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.1.2\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.1.2.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.1.2.1.1.1\" style=\"font-size:80%;\">0</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.1.3\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.1.3.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.1.3.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.1.3.1.1.1\" style=\"font-size:80%;\">0.5</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.1.4\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.1.4.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.1.4.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.1.4.1.1.1\" style=\"font-size:80%;\">1</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_r ltx_border_t\" id=\"S4.T3.3.2.1\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.2.1.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.2.1.1.1.1\" style=\"font-size:80%;\">PSDS1</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T3.3.2.2\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.2.2.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.2.2.1.1.1\" style=\"font-size:80%;\">0.565</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T3.3.2.3\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.2.3.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.2.3.1.1.1\" style=\"font-size:80%;\">0.587</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_t\" id=\"S4.T3.3.2.4\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.2.4.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.2.4.1.1\"><span class=\"ltx_text\" id=\"S4.T3.3.2.4.1.1.1\" style=\"font-size:80%;\">0.579</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_middle ltx_border_r\" id=\"S4.T3.3.3.1\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T3.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S4.T3.3.3.1.1.1\"><span class=\"ltx_rule\" style=\"width:100%;height:0.8pt;background:black;display:inline-block;\">\u00a0</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.3.2\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.3.3\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\"></td>\n<td class=\"ltx_td ltx_align_justify ltx_align_middle\" id=\"S4.T3.3.3.4\" style=\"width:28.5pt;padding-top:0.8pt;padding-bottom:0.8pt;\"></td>\n</tr>\n</table>\n</figure>",
|
| 126 |
+
"capture": "Table 3: Ablation study on the hyperparameters in \nthe global-local feature fusion strategy."
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
"image_paths": {
|
| 130 |
+
"1": {
|
| 131 |
+
"figure_path": "2408.08673v2_figure_1.png",
|
| 132 |
+
"caption": "Figure 1: The architecture of MAT-SED, comprising two main components: the encoder network (green) and the context network (yellow), both of which are based on Transformer structures. \"RPE\" in the context network indicates the relative positional encoding.",
|
| 133 |
+
"url": "http://arxiv.org/html/2408.08673v2/x1.png"
|
| 134 |
+
},
|
| 135 |
+
"2": {
|
| 136 |
+
"figure_path": "2408.08673v2_figure_2.png",
|
| 137 |
+
"caption": "Figure 2: The global-local feature fusion strategy in the fine-tuning stage.",
|
| 138 |
+
"url": "http://arxiv.org/html/2408.08673v2/x2.png"
|
| 139 |
+
},
|
| 140 |
+
"3": {
|
| 141 |
+
"figure_path": "2408.08673v2_figure_3.png",
|
| 142 |
+
"caption": "Figure 3: Convergence curves of training MAT-SED from scratch and end-to-end fine-tuning after masked-reconstruction pre-training.",
|
| 143 |
+
"url": "http://arxiv.org/html/2408.08673v2/x3.png"
|
| 144 |
+
},
|
| 145 |
+
"4": {
|
| 146 |
+
"figure_path": "2408.08673v2_figure_4.png",
|
| 147 |
+
"caption": "Figure 4: Impact of different masking ratio inimage the masked-reconstruction pre-training stage.",
|
| 148 |
+
"url": "http://arxiv.org/html/2408.08673v2/x4.png"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"validation": true,
|
| 152 |
+
"references": [],
|
| 153 |
+
"url": "http://arxiv.org/html/2408.08673v2"
|
| 154 |
+
}
|
20240819/2408.09624v1.json
ADDED
|
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Attention is a smoothed cubic spline",
|
| 3 |
+
"abstract": "We highlight a perhaps important but hitherto unobserved insight: The attention module in a transformer is a smoothed cubic spline. Viewed in this manner, this mysterious but critical component of a transformer becomes a natural development of an old notion deeply entrenched in classical approximation theory. More precisely, we show that with ReLU-activation, attention, masked attention, encoder\u2013decoder attention are all cubic splines. As every component in a transformer is constructed out of compositions of various attention modules (= cubic splines) and feed forward neural networks (= linear splines), all its components \u2014 encoder, decoder, and encoder\u2013decoder blocks; multilayered encoders and decoders; the transformer itself \u2014 are cubic or higher-order splines. If we assume the Pierce\u2013Birkhoff conjecture, then the converse also holds, i.e., every spline is a ReLU-activated encoder. Since a spline is generally just , one way to obtain a smoothed -version is by replacing ReLU with a smooth activation; and if this activation is chosen to be SoftMax, we recover the original transformer as proposed by Vaswani et al. This insight sheds light on the nature of the transformer by casting it entirely in terms of splines, one of the best known and thoroughly understood objects in applied mathematics.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Mathematical description of the transformer",
|
| 9 |
+
"text": "A transformer is typically presented in the literature as a flow chart [45 ###reference_b45###, Figure 1]. We show a version in Figure 1 ###reference_###.\nWithout a rigorous definition of the transformer, it will be difficult if not impossible to prove mathematical claims about it. We will nail down in mathematically precise terms the full inner workings of a transformer. While it is common to find descriptions that selectively present parts as well-defined maps and revert to words and pictures when it becomes less convenient, what sets us apart below is thoroughness \u2014 nothing will be swept under the rug. On occasions we had to look into the source codes of common implementations to unravel inconvenient details left ambiguous in the literature. This section is our small side contribution and a public service.\nThe heart of Figure 1 ###reference_### are the two parts enclosed in red dash lines, called encoder and decoder respectively. They are constructed out of feed forward neural networks, defined in Section 1.2 ###reference_###, and attention modules, defined in Section 1.3 ###reference_###, chained together via function compositions. The simplest version is the encoder in Section 1.4 ###reference_### and is what the uninitiated reader should keep in mind. We add the bells and whistles later: Section 1.5 ###reference_### defines the masked attention in the right-half of Figure 1 ###reference_###, from which we obtain the decoder in Section 1.6 ###reference_###. Section 1.7 ###reference_### explains the encoder\u2013decoder structure \u2014 the left- and right-halves in Figure 1 ###reference_###. Section 1.8 ###reference_### puts everything together to define the transformer. Section 1.10 ###reference_0### discusses the one omission in Figure 1 ###reference_###, the \u201cadd & norm\u201d layers found in [45 ###reference_b45###, Figure 1]."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "1.1. Notations",
|
| 15 |
+
"text": "We write all vectors in as column vectors, i.e., . Let . When enclosed in parentheses denotes a column vector, i.e.,\nWhen enclosed in brackets is a row vector.\nWe will apply this convention more generally: For matrices , we write\nand .\nWhen we write for functions , , it denotes the function\nThe function takes a vector and outputs a probability vector of the same dimension,\nWhen is applied to a matrix , it is applied columnwise to each of the columns of . So .\nAlthough we will write throughout to avoid clutter, we will allow for in the argument of our functions on occasion, which will be clearly indicated. Note that if ."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "1.2. Feed forward neural network",
|
| 21 |
+
"text": "The rectified linear unit is defined by and extended coordinatewise to vectors in or matrices in . We also introduce the shorthand . Clearly, for any .\nAn -layer feed forward neural network is a map defined by a composition:\nfor any input , weight matrix , with , ,\nwith the bias vector, and the activation function, applied coordinatewise. In this article, we set throughout. To avoid clutter we omit the for function composition within a feed forward neural network unless necessary for emphasis, i.e., we will usually write instead of . When is applied to a matrix , it is always applied columnwise to each of the columns of . So . We will also drop the \u201cfeed forward\u201d henceforth since all neural networks that appear in our article are feed forward ones."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "1.3",
|
| 25 |
+
"parent_section_id": "1",
|
| 26 |
+
"section_name": "1.3. Attention",
|
| 27 |
+
"text": "The attention module is known by a variety of other names, usually a combination of attention/self-attention module/mechanism, and usually represented as flow charts as in Figure 2 ###reference_###.\nMathematically, it is a map ,\nwhere , , are linear layers, i.e., given by affine maps\nwith weight matrices , , and\nbias matrices , . Here we have used the more general affine form of these linear layers as attention modules are implemented in practice,111https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html#torch.nn.MultiheadAttention ###reference_torch.nn.MultiheadAttention.html#torch.nn.MultiheadAttention###; the affine form is obtained by setting add_bias_kv = True. as opposed to the linear form in [45 ###reference_b45###] where the biases are set to zero. The in (3 ###reference_###) is applied columnwise and outputs a matrix.\nThe map implements the mechanism of taking a query and a set of key\u2013value pairs to an output. Interpreted in this way, the input is a data sequence of length , with each data point , . The columns of and represent queries and keys respectively \u2014 note that these are vectors in and is generally much smaller than or . The columns of represent values.\nMore generally, a multihead or -headed attention module is a map given by\nwhere are attention modules as in (3 ###reference_###), . The reader is reminded of our convention in Section 1.1 ###reference_###: parentheses denote column, which is why in our constructions we will often the phrase \u201cstacking to obtain \u201d to mean (5 ###reference_###)."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "1.4",
|
| 31 |
+
"parent_section_id": "1",
|
| 32 |
+
"section_name": "1.4. Encoder",
|
| 33 |
+
"text": "An encoder block, or more precisely a -head encoder block, is a map obtained by composing the output of a -head attention module , with an -layer -neural network ,\nMore generally, an encoder or -layer encoder, is obtained by composing encoder blocks, i.e.,\nwhere are neural networks and are attention modules, , . In Figure 1 ###reference_###, the encoder is the part enclosed within the red dash lines on the left. The structure in (7 ###reference_###) appears to require alternate compositions of attention modules and neural networks but one may skip some or all of the \u2019s. The reason is that we may choose these \u2019s to be an identity map, which can be represented as a one-layer neural network as .\nWhile we allow the neural networks appearing in (7 ###reference_###) to have multiple hidden layers, the original proposed model in [45 ###reference_b45###] requires that they be single-layer. We will show in Lemma 3.7 ###reference_theorem7### that these are in fact equivalent: Any encoder of the form (7 ###reference_###) may be written as one where all \u2019s have only one hidden layer, but at the expense of a larger ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "1.5",
|
| 37 |
+
"parent_section_id": "1",
|
| 38 |
+
"section_name": "1.5. Masked attention",
|
| 39 |
+
"text": "In many applications of transformers, particularly large language models, the data is of a sequential nature. So the function we want to learn or approximate is expected to be autoregressive [45 ###reference_b45###], i.e., takes the form\nIn other words depends only on the first columns , . In general will be nonlinear, but when is linear, then this simply means it is given by an upper triangular matrix. So an autoregressive function may be viewed as a nonlinear generalization of an upper triangular matrix.\nTo achieve this property in attention module, we define the function by\nA masked attention module is then given by\nIt is easy to check that a masked attention module is always autoregressive."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "1.6",
|
| 43 |
+
"parent_section_id": "1",
|
| 44 |
+
"section_name": "1.6. Decoder",
|
| 45 |
+
"text": "A decoder block is the analogue of an encoder block where we have a masked attention in (6 ###reference_###):\nWe may also replace any or all of the \u2019s in (7 ###reference_###) by masked versions \u2019s. If we replace all, then the resulting map\nis autoregressive but more generally we will just selectively replace some \u2019s with \u2019s. We call the resulting map a decoder. Note that the part enclosed within red dash lines in the right-half of Figure 1 ###reference_### is not quite a decoder as it takes a feed from the left-half; instead it is an encoder\u2013decoder, as we will discuss next."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "1.7",
|
| 49 |
+
"parent_section_id": "1",
|
| 50 |
+
"section_name": "1.7. Encoder\u2013decoder attention",
|
| 51 |
+
"text": "The multihead attention in the right-half of Figure 1 ###reference_### accepts a feed from outside the red dash box. When used in this manner, it is called an encoder\u2013decoder attention module [45 ###reference_b45###], as it permits one to use queries from the decoder, but keys and values from the encoder. Mathematically, this is a map ,\nwhere are as in (4 ###reference_###) but while are functions of , is now a function of . The independent matrix variables and take values in and respectively. As a result we have to adjust the dimensions of the weight matrices slightly: , , . The encoder\u2013decoder attention is partially autoregressive, i.e., autoregressive in but not in , taking the form"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "1.8",
|
| 55 |
+
"parent_section_id": "1",
|
| 56 |
+
"section_name": "1.8. Transformer",
|
| 57 |
+
"text": "An encoder\u2013decoder block is defined by a multihead masked attention module , a multihead encoder\u2013decoder attention module , and a neural network , via\nAn -layer encoder\u2013decoder is then constructed from an -layer encoder , and encoder\u2013decoder blocks given by . We define recursively as\nfor , . We call the encoder\u2013decoder. For all mathematical intents and purposes, is the transformer. As we will see in Sections 1.10 ###reference_0### and 1.11 ###reference_1###, the other components in Figure 1 ###reference_### or [45 ###reference_b45###, Figure 1] are extraneous to the operation of a transformer.\nWe stress that the word \u201ctransformer\u201d is sometimes used to refer to just the encoder or the decoder alone. We choose to make the distinction in our article but many do not. For example, Google\u2019s BERT [16 ###reference_b16###], for Bidirectional Encoder Representations from Transformers, is an encoder whereas OpenAI\u2019s GPT [6 ###reference_b6###], for Generative Pretrained Transformer, is a decoder."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "1.9",
|
| 61 |
+
"parent_section_id": "1",
|
| 62 |
+
"section_name": "1.9. ReLU-transformer",
|
| 63 |
+
"text": "The definitions in Sections 1.2 ###reference_###\u20131.8 ###reference_### are faithful mathematical transcriptions of components as described in Vaswani et al. original article [45 ###reference_b45###]. In this section we take a small departure \u2014 replacing every occurrence of with to obtain what is called a -transformer. This is not new either but proposed and studied in [3 ###reference_b3###, 48 ###reference_b48###].\nWe begin by defining -attention modules. They have the same structures as (3 ###reference_###), (9 ###reference_###), (12 ###reference_###) except that is replaced by , i.e.,\nAn encoder, decoder, or encoder\u2013decoder constructed out of such -attention modules will be called a -encoder, -decoder, or -encoder\u2013decoder respectively. In particular, a -transformer is, for all mathematical intents and purposes, a -encoder\u2013decoder.\nThese -activated variants are essentially \u201cunsmoothed\u201d versions of their smooth -activated cousins in Sections 1.2 ###reference_###\u20131.8 ###reference_###. We may easily revert to the smooth versions by a simple smoothing process \u2014 replace all -activated attentions by the original -activated ones (but the neural networks would remain -activated).\n-transformers work naturally with our claims and proofs in Section 3 ###reference_###. Nevertheless, even in practice -transformers can have desirable, possibly superior, features compared to the original -transformers: investigations in [48 ###reference_b48###] provided extensive empirical evidence that substituting with causes no noticeable loss and occasionally even affords a slight gain in performance across both language and vision tasks; it is also easier to explain the in-context-learning capability of -transformers [3 ###reference_b3###].\nMore generally, the use of alternative activations in a transformer is a common practice. There are various reasons to replace , one of which is to avoid the considerable training cost associated with the use of activation. In [30 ###reference_b30###], is replaced with a Gaussian kernel; in [27 ###reference_b27###], only the normalization part of is kept; in [22 ###reference_b22###], it is shown that an activation does not need to map into the probability simplex. Linearized attentions are used in [39 ###reference_b39###], and sparse attentions in [37 ###reference_b37###]; these are intended primarily to accelerate the operator but they have other features too."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "1.1",
|
| 67 |
+
"parent_section_id": "1",
|
| 68 |
+
"section_name": "1.10. Layer normalization and residual connection",
|
| 69 |
+
"text": "Comparing our Figure 1 ###reference_### and [45 ###reference_b45###, Figure 1], one might notice that we have omitted the \u201cadd & norm\u201d layers.\nThe \u201cadd\u201d step, also called residual connection [23 ###reference_b23###], may be easily included in our analysis \u2014 all our results and proofs in Section 3 ###reference_### hold verbatim with the inclusion of residual connection. For an encoder block , a residual connection simply means adding the identity map , , i.e.,\nand likewise for a decoder block . For an encoder\u2013decoder block , a residual connection simply means adding the projection map , , i.e.,\nAs will be clear from the proofs in Section 3 ###reference_###, all results therein hold with or without residual connection.\nThe \u201cnorm\u201d step, also called layer normalization [2 ###reference_b2###] refers to statistical standardization, i.e., mean centering and scaling by standard deviation of each column vector in . This is an ubiquitous process routinely performed in just about any procedure involving any data for practical reasons. But this innocuous process introduces additional nonlinearity that does not fit in our framework.\nWe do not consider either of these critical to the workings of a transformer. They are by no means unique and may be easily replaced with other data standardization process, as shown in [22 ###reference_b22###]."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "1.1",
|
| 73 |
+
"parent_section_id": "1",
|
| 74 |
+
"section_name": "1.11. Miscellany",
|
| 75 |
+
"text": "The \u201cinput/output embedding\u201d and \u201cposition embedding\u201d in Figure 1 ###reference_### convert sentences or images (or whatever real-world entity the transformer is used for) to an input in ; the \u201clinear layer\u201d and \u201cSoftMax\u201d in the right half assign probability values to the output. These are just auxiliary components necessary in any situation involving human-generated input or requiring human-interpretable output. They are common to all practical AI models and we do not regard them as part of the transformer architecture."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "2",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "2. Splines",
|
| 81 |
+
"text": "This section covers the salient aspects of splines relevant for us. We write for the ring of polynomials with real coefficients in variables and for that in .\nSplines have a rich history and a vast literature in applied and computational mathematics, this being precisely the reason we chose them as our platform to understand a new technology like the transformer. Mathematical splines, as opposed to the mechanical ones used by draftsmen and shipbuilders, were first named in [42 ###reference_b42###]. A one-line summary of its early history, with many regretful omissions, is that univariate splines were first proposed in [41 ###reference_b41###], multivariate splines in [4 ###reference_b4###], B-Splines in [10 ###reference_b10###], and box splines in [15 ###reference_b15###].\nAn important departure of our discussion of splines in this article is that we will not concern ourselves with differentiability, avoiding the usual efforts to ensure that a piecewise-defined function is at points where the different pieces meet. The reason is simple: our results in the next section will show that every continuous spline is a -transformer (and vice versa) and when presented as such, there is a straightforward and natural way to smooth a spline to any desired degree-of-smoothness , namely, by replacing with a -activation. So there is no need for us to even introduce the notions of knots, tangential continuity, curvature continuity, etc. Indeed, viewed in this manner, the transformer with its activation is the first example of a \u201c-spline\u201d \u2014 an impossible object in classical constructions of splines as the degree-of-smoothness of a spline can never exceed the degree of its polynomial pieces."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "2.1",
|
| 85 |
+
"parent_section_id": "2",
|
| 86 |
+
"section_name": "2.1. Scalar-valued splines",
|
| 87 |
+
"text": "In its simplest form a spline is a piecewise-polynomial real-valued function defined over a partition of its domain . The classical and most basic partition is a triangulation, i.e., a subdivision into -dimensional simplices whose union is and intersecting only along faces; more generally one may also use convex polytopes in place of simplices [13 ###reference_b13###, 8 ###reference_b8###, 35 ###reference_b35###]. We will need a slightly more sophisticated partition called a semialgebraic partition [18 ###reference_b18###, 17 ###reference_b17###, 43 ###reference_b43###]. For any , let\na finite set of size . Note that this is really just the set of ternary numerals with (ternary) bits.\nAny induces a sign partition of via\nThen is a partition of , the semialgebraic partition induced by .\nNote that the domain of in (14 ###reference_###) merely serves as a placeholder for any -element set and does not need to be . Indeed we will usually write to emphasize that it is an index for the partition induced by . Any triangulation or partition into polytopes can be obtained by choosing appropriate linear polynomials so Definition 2.1 ###reference_theorem1### generalizes the basic one that requires partition to be piecewise linear.\nLet be the semialgebraic partition induced by . A continuous function is a polynomial spline of degree if for each ,\nhas degree not more than ;\nif , then restricts to a polynomial of degree not more than on , i.e.,\n for all , for some of degree not more than .\nHenceforth, \u201cspline\u201d will mean \u201cpolynomial spline,\u201d \u201cdegree-\u201d will mean \u201cdegree not more than ,\u201d and \u201cpartition\u201d will mean \u201csemialgebraic partition.\u201d The small cases are customarily called linear, quadratic, cubic, and quintic splines respectively. The standard notation for the set of all -times differentiable degree- splines with partition induced by is but since we will only need the case and splines as defined in Definition 2.2 ###reference_theorem2### are always continuous, we may drop the superscript .\nObserve that is a finite-dimensional real vector space. So it is straightforward to extend Definition 2.2 ###reference_theorem2### to -valued splines for any finite-dimensional real vector space using tensor product, namely, they are simply elements of [29 ###reference_b29###, Example 4.30]. For the benefit of readers unfamiliar with tensor product constructions, we go over this below in a concrete manner for and ."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "2.2",
|
| 91 |
+
"parent_section_id": "2",
|
| 92 |
+
"section_name": "2.2. Vector-valued splines",
|
| 93 |
+
"text": "A vector-valued degree- spline is given by\nwhere and are the standard basis vectors. This is equivalent to requiring be a degree- spline coordinatewise, i.e., where .\nTraditionally, vector-valued splines are the most important class of splines for practical applications. Special cases include spline curves (, or ) and spline surfaces (, or ), used to parameterize curves and surfaces that pass near a collection of given data points. These are of fundamental importance in computer graphics and computer-aided design [20 ###reference_b20###, 44 ###reference_b44###]."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "2.3",
|
| 97 |
+
"parent_section_id": "2",
|
| 98 |
+
"section_name": "2.3. Matrix-valued splines",
|
| 99 |
+
"text": "In this case we are interested in splines that are not just matrix-valued but also matrix-variate. One nice feature with our treatment of splines in Section 2.1 ###reference_### is that we can define matrix-variate splines over by simply replacing all occurrences of with . A matrix-valued degree- spline is then given by\nwhere and , , . Here is the standard basis matrix with one in th entry and zeros everywhere else. Again, an alternative but equivalent way to define them would be in a coordinatewise fashion, i.e., where , , . Note that reduces to the case in Section 2.2 ###reference_###."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "2.4",
|
| 103 |
+
"parent_section_id": "2",
|
| 104 |
+
"section_name": "2.4. Pierce\u2013Birkhoff conjecture",
|
| 105 |
+
"text": "Garrett Birkhoff, likely the person first to realize the importance of splines in applications though his consulting work [49 ###reference_b49###], also posed one of the last remaining open problems about splines [5 ###reference_b5###].\nFor every spline , there exists a finite set of polynomials , , such that\nThis conjecture is known to be true for and but is open for all higher dimensions [33 ###reference_b33###]. Our results in Section 3.3 ###reference_### will be established on the assumption that the Pierce\u2013Birkhoff conjecture holds true for all , given that there is significant evidence [31 ###reference_b31###, 46 ###reference_b46###, 34 ###reference_b34###] for its validity.\nThe kind of functions on the right of (16 ###reference_###) we will call max-definable functions in the variables . These are functions generated by under three binary operations: addition , multiplication , maximization ; and scalar multiplication by . Note that minimization comes for free as . Using the identity\n,\nany max-definable functions can be reduced to the form with [24 ###reference_b24###]. The notion may be easily extended to matrix-variate, matrix-valued functions coordinatewise, i.e., by requiring that each be a max-definable function in the variables .\nClearly, the set of max-definable functions is contained within the set of splines. Pierce\u2013Birkhoff conjecture states that the two sets are equal. Both are examples of an \u201c-ring\u201d as defined in [5 ###reference_b5###], now christened \u201cPierce\u2013Birkhoff ring\u201d after the two authors. If we drop multiplication from the list of binary operations generating the max-definable functions, the resulting algebraic object is the renown max-plus algebra or tropical semiring [32 ###reference_b32###]."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "3",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "3. Equivalence of splines and transformers",
|
| 111 |
+
"text": "We will show that every component of the transformer defined in Section 1 ###reference_### is a spline \u2014 neural network, attention module, masked attention module, encoder block, decoder block, encoder, decoder, encoder\u2013decoder \u2014 so long as they are -activated. More importantly, if Conjecture 2.3 ###reference_theorem3### is true, then the converse also holds in the sense that every spline is an encoder. The equivalence between -activated feed-forward neural networks and linear splines is well-known [1 ###reference_b1###]. The other equivalences will be established below. Henceforth we will assume -activation throughout this section and will not specify this unless necessary for emphasis."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "3.1",
|
| 115 |
+
"parent_section_id": "3",
|
| 116 |
+
"section_name": "3.1. Transformers are splines",
|
| 117 |
+
"text": "We will first remind readers of the main result in [1 ###reference_b1###] establishing equivalence between neural networks and linear splines.\nEvery neural network is a linear spline, and every linear spline can be represented by a neural network with at most depth.\nCompositions of spline functions are by-and-large uncommon in the literature for reasons mentioned in the beginning \u2014 one usually combines splines by taking sums or linear combinations. Matrix-valued splines also appear to be somewhat of a rarity in the literature. Consequently we are unable to find a reference for what ought to be a fairly standard result about degrees under composition and matrix multiplication, which we state and prove below.\nLet be a spline of degree and a spline of degree . Then is a spline of degree .\nLet and be splines of degrees and . Then , , is a spline of degree .\nWe first assume that , i.e., is a spline of degree . For a degree- spline , we claim that the composition is a spline of degree at most .\nA partition induced by any can be refined to by adding finitely many polynomials. Any spline in is also a spline in . By passing through such refinements, we may assume that are defined over a common partition. So let with\nwhere .\nLet with\nwhere .\nLet and\nAny can be restricted to , giving . Let\nThen can also be restricted to , giving . For any nonempty , we have\nfor where . So . This shows (i) ###reference_i1### for . For general , we may again assume, by passing through a refinement if necessary, that share a common partition, we then apply the same argument coordinatewise.\nWe then deduce (ii) ###reference_i2### from (i) ###reference_i1###, by composing the spline with the polynomial (and therefore spline) function .\n\u220e\nWith the ground work laid in Section 1 ###reference_###, i.e., having the components of a transformer rigorously defined, it becomes relatively straightforward to show that these components are all splines.\nAn attention module is a cubic spline.\nA masked attention module is a cubic spline.\nAn encoder\u2013decoder attention module is a cubic spline.\nAn encoder block is a cubic spline.\nA decoder block is a cubic spline.\nAn encoder\u2013decoder block is a quintic spline.\nA -layer encoder is a spline of degree .\nA -layer decoder is a spline of degree .\nAn encoder\u2013decoder with -layer of encoder blocks and -layer of encoder\u2013decoder blocks is a spline of degree .\nLet be splines of degree . Since , are linear spline, it follows from Lemma 3.2 ###reference_theorem2###(i) ###reference_i1### that and are splines of degree . In the attention module, are linear splines, it follows from Lemma 3.2 ###reference_theorem2###(ii) ###reference_i2### that is a quadratic spline. Hence is also a quadratic splines and is a cubic spline. Similarly, the masked attention and encoder\u2013decoder attention in (13 ###reference_###) are also cubic splines. Note that the encoder\u2013decoder attention is a quadratic spline with respect to the first variable , and a linear spline with respect to the second variable ; but overall it is a cubic spline with respect to .\nA neural network is a linear spline by Theorem 3.1 ###reference_theorem1###. So the encoder block in (6 ###reference_###) and decoder block in (10 ###reference_###) remain cubic splines by Lemma 3.2 ###reference_theorem2###(i) ###reference_i1###. The encoder\u2013decoder block is quadratic in and cubic in , and thus quintic in . Since a -layer encoder or decoder is a composition of (masked) attention modules and neural networks, it is a spline of degree . For an encoder\u2013decoder with layers of encoder blocks and layers of encoder\u2013decoder blocks, induction on gives \nas its degree.\n\u220e\nThe splines in (ii) ###reference_i2###, (iii) ###reference_i3###, (v) ###reference_i5###, (viii) ###reference_i8### are autoregressive and those in (vi) ###reference_i6### and (ix) ###reference_i9### partially autoregressive. The term \u201cautoregressive spline\u201d does appear in the literature but it is used in a sense entirely unrelated to (8 ###reference_###). We will have more to say about this in Corollary 3.10 ###reference_theorem10###."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "3.2",
|
| 121 |
+
"parent_section_id": "3",
|
| 122 |
+
"section_name": "3.2. Veronese map",
|
| 123 |
+
"text": "The degree- Veronese embedding is a well-known map in algebraic geometry [21 ###reference_b21###, pp. 23\u201325] and polynomial optimization [28 ###reference_b28###, pp. 16\u201317]. Informally it is the map that takes variables to the monomials of degree not more than in . This defines an injective smooth function\nThe value gives the number of monomials in variables of degree not more than . Two simple examples: ,\n;\n,\n.\nIn algebraic geometry [21 ###reference_b21###, pp. 23\u201325] the Veronese map is usually defined over projective spaces whereas in polynomial optimization [28 ###reference_b28###, pp. 16\u201317] it is usually defined over affine spaces as in (17 ###reference_###). Nevertheless this is a trivial difference as the former is just a homogenized version of the latter.\nAs is standard in algebraic geometry and polynomial optimization alike, we leave out the domain dependence from the notation to avoid clutter, e.g., the quadratic Veronese map and are both denoted by . This flexibility allows us to compose Veronese maps and speak of for any . For example we may write , using the same notation for two different maps.\nThe Veronese map is also defined over matrix spaces: When applied to matrices, the Veronese map simply treats the coordinates of an matrix as variables. So is given by\nFor example evaluated on gives\nAn important observation for us is the following.\nLet . Then every coordinate of occurs in .\nThis is a consequence of the observation that any monomial of degree not more than can be written as a product of monomials, each with degree not more than .\n\u220e\nAnother result that we will need is the following equivalent formulation of Pierce\u2013Birkhoff conjecture in terms of Veronese map.\nThe Pierce\u2013Birkhoff conjecture holds if and only if for any spline , there exist and a linear spline such that .\nFirstly note that Pierce\u2013Birkhoff conjecture holds for : Any linear spline can be represented in the form where are linear polynomials [36 ###reference_b36###]. Conversely, if can be represented in the form , then it is clearly a linear spline.\nAssuming that Pierce\u2013Birkhoff conjecture holds in general, then any polynomial spline can be written as , which is a linear spline over monomials of , i.e., for some linear spline . Conversely, if every polynomial spline can be written as for some linear spline , then since can always be written as , we have for some linear polynomials \u2019s. Thus we recover the statement of Pierce\u2013Birkhoff conjecture.\n\u220e\nObserve that Lemma 3.5 ###reference_theorem5### applies verbatim to matrix-variate splines , except that would have to be replaced by throughout and we have"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "3.3",
|
| 127 |
+
"parent_section_id": "3",
|
| 128 |
+
"section_name": "3.3. Splines are transformers",
|
| 129 |
+
"text": "We will show that any matrix-valued spline is an encoder. First we will prove two technical results. We will use to distinguish between indices. We remind the reader that .\nLet be the quadratic Veronese map.\nThere exists a two-layer encoder such that every column of contains a copy of in the form\nMore precisely, there is a -headed attention module\n,\na one-layer neural network,\n,\na -headed attention module\n,\nand another one-layer neural network\n,\nsuch that\nIn particular, any monomial of degree not more than two in the entries of appears in every column of .\nWe will first construct a multihead attention module with the property that each of the columns of contains every entry of , i.e., , , . Fix any and consider the single-head attention module as in (3 ###reference_###) with\nas in (4 ###reference_###). Then the th entry of is exactly and all other entries in the first row are zeros. If we repeat this for all , and stack these attention modules together, we obtain the multihead attention . By construction any column of contains every entry of .\nFor the required , we need to augment so that every column of will also contain the constant .\nConsider the single-head attention module with\nThen the th entry of is , and all other entries in the first row are zeros. We repeat this for all and stack with to obtain the required . Note that has heads.\nBecause , the coordinate function can be represented using a one-layer neural network. So there exists a one-layer neural network that only keeps all first rows of the above attention modules, and gives\nWe will now construct . We first repeat the construction above so that the first heads of will produce all linear monomials (i.e., the entries of ), and the constant. In particular, by the end of our construction, every column of will contain every entry of and , and each of these entries is the only nonzero entry in its row.\nWe then construct the next batch of heads of that will produce all quadratic monomials. Consider the attention module defined by\nThen the th entry of is . In other words we can form quadratic terms in th column out of entries in th column. If we repeat this for all , , and stack these attention modules together, we obtain a multihead attention .\nBy our previous construction, among the rows of are two with only the th column nonzero, taking values and respectively. Composing with , we obtain a row with th entry , and other entries zeros. So the composition contains all quadratic terms of the form in any column, and each of those entries is the only nonzero entry in its row. We may repeat the same argument to obtain a multihead attention with the property that the composition contains all quadratic terms of the form in any column, and each of those entries is the only nonzero entry in its row. The required is then obtained by stacking , , together with the first heads that give the linear monomials and constant.\nThe one-layer neural network is then chosen so that it gives the quadratic monomial\nfor and .\n\u220e\nRecall from Section 1.2 ###reference_### that whenever a neural network takes a matrix input , it is applied columnwise to each column . In general an attention module and a neural network are distinct objects. But there is one special case when they are related.\nLet be a one-layer neural network. Then\nis an encoder block of the form\n\nwhere is also a one-layer neural network and is an attention module.\nConsider the attention module given by\nThe first row of has in its th entry and zeros elsewhere. If we stack these attention modules , , together, we obtain an -headed attention module . By construction, contains a submatrix of the form\nwhere is the th column of , .\nLet be the affine map given by\nWe apply columnwise to , extending its domain so that maps every row outside the submatrix in (20 ###reference_###) to zero. Then the submatrix in (20 ###reference_###) is transformed as\nand every other row outside of this submatrix gets mapped to zero. In other words is a left inverse of . The required statement then follows from\n,\nwith and .\n\u220e\nWhile the definition of an encoder as in Section 1.4 ###reference_### does not require the neural networks within it to have only one hidden layer, the original version in [45 ###reference_b45###] does. Lemma 3.7 ###reference_theorem7### shows that this is not really a more stringent requirement since whenever we are presented with a multilayer neural network we may repeatedly apply Lemma 3.7 ###reference_theorem7### to turn it into the form required in [45 ###reference_b45###].\nAssuming the Pierce\u2013Birkhoff conjecture, we may now show that any matrix-valued spline is an encoder. We prove the most general case possible so that other special cases follow effortlessly: the corresponding result for vector-valued splines is obtained by setting and that for scalar-valued splines by setting . Note also that the result below applies to splines defined on any semialgebraic partition \u2014 the most common rectilinear partition obtained through triangular of domain is also a special case.\nLet be a max-definable function. Then is a -layer encoder for some finite . More precisely, there exist attention modules and one-layer neural networks \nsuch that\nIf the Pierce\u2013Birkhoff conjecture holds, then any degree- spline is an encoder.\nLet with , . By Lemma 3.5 ###reference_theorem5###, we may write where is a linear spline and the Veronese map of degree . Let . Then by padding with extra terms with zero coefficients, we may assume\nNote that .\nIt follows from Lemma 3.4 ###reference_theorem4### that we may obtain all monomials of degree not more than by composing the quadratic Veronese map with itself sufficiently many times. So by composing copies of the encoder constructed in Lemma 3.6 ###reference_theorem6###, we obtain an encoder\nwith the property that any column of contains a copy of Veronese map of degree , i.e.,\nThere is a slight abuse of notation in (22 ###reference_###): We have assumed that the th copy of has input dimension , the output dimension of the th copy of . Strictly speaking these are different maps since domains and codomains are different although we denote all of them as . Also, in the final layer, we drop any rows that we do not need \u2014 this is not a problem as \u201cdropping rows\u201d is just a modification of the neural network in the last layer , which we will be modifying anyway below.\nExpanding each copy of as in (19 ###reference_###), we obtain the structure in (7 ###reference_###), i.e.,\nfor some . We will modify the attention module in the last layer. For , we let be a (single-head) attention module with\nand is a nonnegative constant matrix to be determined later. The first row of is the first row of , i.e., contains a row of nonnegative constants. By stacking heads onto , we obtain a modified attention module with extra heads,\nPrefixing these heads to will allow us to add rows of nonnegative constants to .\nFirst, by modifying the neural network to , one that keeps the first row of each those extra heads, we see that will have rows of nonnegative constants irrespective of . We may also choose so that these occur as the first through th rows, denoted as\nfor some , . Note that each row of the matrix above comes from one of the added heads .\nBy replacing and in (23 ###reference_###) with and , we obtain an encoder ,\nBy our construction we must have\nDefine the linear spline by\nHere , .\nBy Theorem 3.1 ###reference_theorem1###, linear splines are exactly neural networks. Recall from Section 1.2 ###reference_### that when we apply a neural network to a matrix, we apply it columnwise. Hence\nwhere we have used (21 ###reference_###). Now we set\nfor each .\nLet be the linear map defined by\nwhere .\nThen the composition of has\nas required. At this point we have obtained as an encoder according to the definition in Section 1.4 ###reference_### since is clearly a multilayer neural network. By our remark after the proof of Lemma 3.7 ###reference_theorem7###, it may be converted into an alternate composition of attention modules and single-layer neural networks.\n\u220e\nIn case the reader is wondering the value of in the proof above is not necessarily and can be strictly larger. To the best of our knowledge, there is not even a conjectural effective version of Conjecture 2.3 ###reference_theorem3### in the literature. So unlike Theorem 3.1 ###reference_theorem1###, any bounds on the number of encoder blocks, number of heads of attention modules, width of the neural networks, etc, are beyond reach at this point.\nJust as Theorem 3.1 ###reference_theorem1### establishes the equivalence between -neural networks and linear splines, various parts of the results in this article collectively establish the equivalence between -encoders and splines, assuming the validity of the Pierce\u2013Birkhoff conjecture.\nIf the Pierce\u2013Birkhoff conjecture holds, then the following classes of functions are all equal:\nsplines;\nencoders;\nmax-definable functions;\nlinear splines composed with the Veronese map.\nWhile our article is about understanding transformers in terms of splines, there is a somewhat unexpected payoff: the proof of Theorem 3.8 ###reference_theorem8### yields a way to construct autoregressive splines. There appears to be no universally agreed-upon meaning for the term \u201cautoregressive spline\u201d in the existing literature. In particular none replicates (8 ###reference_###) and we are unaware of any construction that yields a spline that is autoregressive in the sense of (8 ###reference_###).\nLet and be an autoregressive max-definable function. Then is a -layer decoder for some finite . More precisely, there exist masked attention modules and one-layer neural networks such that\nIf the Pierce\u2013Birkhoff conjecture holds, then any degree- autoregressive spline is a decoder.\nThe proof of Lemma 3.6 ###reference_theorem6### applies almost verbatim. In fact it is slightly simpler since in the th entry, we only need to construct monomials of the form , for . The same constructions used to obtain produce these required monomials when we use masked attention modules in place of attention modules. The proofs of Lemma 3.7 ###reference_theorem7### and Theorem 3.8 ###reference_theorem8### then apply with masked attention modules in place of attention modules.\n\u220e\nA similar construction can be extended to construct partially autoregressive splines as encoder\u2013decoders."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "4",
|
| 133 |
+
"parent_section_id": null,
|
| 134 |
+
"section_name": "4. Conclusion",
|
| 135 |
+
"text": "It is an old refrain in mathematics that one does not really understand a mathematical proof until one can see how every step is inevitable. This is the level of understanding that we hope Section 3.3 ###reference_### provides for the transformer."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "4.1",
|
| 139 |
+
"parent_section_id": "4",
|
| 140 |
+
"section_name": "4.1. Insights",
|
| 141 |
+
"text": "Arora et al. [1 ###reference_b1###] have shown that neural networks are exactly linear splines. Since compositions of linear splines are again linear splines, to obtain more complex functions we need something in addition to neural networks. Viewed in this manner, the attention module in Section 1.3 ###reference_### is the simplest function that serves the role. Lemma 3.6 ###reference_theorem6### shows that the quadratic Veronese map, arguably the simplest map that is not a linear spline, can be obtained by composing two attention modules. The proof reveals how heads and layers are essential: It would fail if we lacked the flexibility of having multiple heads and layers. The proof also shows how a neural network works hand-in-glove with attention module: It would again fail if we lack either one. The proof of Theorem 3.8 ###reference_theorem8### then builds on Lemma 3.6 ###reference_theorem6###: By composing quadratic Veronese maps we can obtain Veronese map of any higher degree; and by further composing it with linear splines we obtain all possible splines. The resulting map, an alternating composition of attention modules and neural networks, is exactly the encoder of a transformer.\nThere are some other insights worth highlighting. Lemma 3.7 ###reference_theorem7### explains why the neural networks within a transformer require no more than one hidden layer; Vaswani et al. [45 ###reference_b45###] likely arrived at this same conclusion through their experimentation. Theorem 3.3 ###reference_theorem3###(vii) ###reference_i7### shows why layering attention modules and neural networks makes for an effective way to increase model complexity \u2014 the degree of the spline increases exponentially with the number of layers ."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "4.2",
|
| 145 |
+
"parent_section_id": "4",
|
| 146 |
+
"section_name": "4.2. Recommendations",
|
| 147 |
+
"text": "Recent work of Wortsman et al. [48 ###reference_b48###] shows that a -transformer is perfectly capable of achieving results of similar quality as the original -transformer, offering significant computational savings. We also advocate the use of activation, if only for turning a nearly-mystical and sometimes-feared technology into a familiar friendly one. In which case we could drop the word \u201csmoothed\u201d in our title \u2014 attention is a cubic spline.\nIf a smooth function is desired, we argue for using instead of as activation. The function is the natural smooth proxy for as well as the derivative of , also known as the log-sum-exp function, which is in turn the natural smooth proxy for . Indeed has been used in place of to construct smooth neural networks with encouraging results [7 ###reference_b7###]. Despite their intimate relationship, makes for a poor proxy for . On the basis of our work, a -activation would be natural, smooth, and preserves fidelity with splines.\nLastly, Section 3.3 ###reference_### points to the importance of a nearly forgotten seventy-year-old conjecture about splines by one of its pioneers. Indeed, Theorem 3.8 ###reference_theorem8### shows that the Pierce\u2013Birkhoff conjecture is true if and only if every spline is an encoder. Perhaps this article will rekindle interest in the conjecture and point a way towards its resolution."
|
| 148 |
+
}
|
| 149 |
+
],
|
| 150 |
+
"appendix": [],
|
| 151 |
+
"tables": {},
|
| 152 |
+
"image_paths": {},
|
| 153 |
+
"validation": true,
|
| 154 |
+
"references": [
|
| 155 |
+
{
|
| 156 |
+
"1": {
|
| 157 |
+
"title": "Understanding deep neural networks with rectified linear units.",
|
| 158 |
+
"author": "R. Arora, A. Basu, P. Mianjy, and A. Mukherjee.",
|
| 159 |
+
"venue": "In International Conference on Learning Representations, 2018.",
|
| 160 |
+
"url": null
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"2": {
|
| 165 |
+
"title": "Layer normalization.",
|
| 166 |
+
"author": "J. L. Ba, J. R. Kiros, and G. E. Hinton.",
|
| 167 |
+
"venue": "arXiv:1607.06450, 2016.",
|
| 168 |
+
"url": null
|
| 169 |
+
}
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"3": {
|
| 173 |
+
"title": "Transformers as statisticians: Provable in-context learning with\nin-context algorithm selection.",
|
| 174 |
+
"author": "Y. Bai, F. Chen, H. Wang, C. Xiong, and S. Mei.",
|
| 175 |
+
"venue": "In Workshop on Efficient Systems for Foundation Models @\nICML2023, 2023.",
|
| 176 |
+
"url": null
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"4": {
|
| 181 |
+
"title": "Smooth surface interpolation.",
|
| 182 |
+
"author": "G. Birkhoff and H. L. Garabedian.",
|
| 183 |
+
"venue": "J. Math. and Phys., 39:258\u2013268, 1960.",
|
| 184 |
+
"url": null
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"5": {
|
| 189 |
+
"title": "Lattice-ordered rings.",
|
| 190 |
+
"author": "G. Birkhoff and R. S. Pierce.",
|
| 191 |
+
"venue": "An. Acad. Brasil. Ci., 28:41\u201369, 1956.",
|
| 192 |
+
"url": null
|
| 193 |
+
}
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"6": {
|
| 197 |
+
"title": "Language models are few-shot learners.",
|
| 198 |
+
"author": "T. Brown, B. Mann, N. Ryder, M. Subbiah, J. D. Kaplan, P. Dhariwal,\nA. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al.",
|
| 199 |
+
"venue": "volume 33, pages 1877\u20131901, 2020.",
|
| 200 |
+
"url": null
|
| 201 |
+
}
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"7": {
|
| 205 |
+
"title": "Log-sum-exp neural networks and posynomial models for convex and\nlog-log-convex data.",
|
| 206 |
+
"author": "G. C. Calafiore, S. Gaubert, and C. Possieri.",
|
| 207 |
+
"venue": "IEEE Trans. Neural Netw. Learn. Syst., 31(3):827\u2013838, 2020.",
|
| 208 |
+
"url": null
|
| 209 |
+
}
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"8": {
|
| 213 |
+
"title": "Multivariate splines, volume 54 of CBMS-NSF Regional\nConference Series in Applied Mathematics.",
|
| 214 |
+
"author": "C. K. Chui.",
|
| 215 |
+
"venue": "Society for Industrial and Applied Mathematics, Philadelphia, PA,\n1988.",
|
| 216 |
+
"url": null
|
| 217 |
+
}
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"9": {
|
| 221 |
+
"title": "Biorthogonal bases of compactly supported wavelets.",
|
| 222 |
+
"author": "A. Cohen, I. Daubechies, and J.-C. Feauveau.",
|
| 223 |
+
"venue": "Comm. Pure Appl. Math., 45(5):485\u2013560, 1992.",
|
| 224 |
+
"url": null
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"10": {
|
| 229 |
+
"title": "On P\u00f3lya frequency functions. IV. The fundamental spline\nfunctions and their limits.",
|
| 230 |
+
"author": "H. B. Curry and I. J. Schoenberg.",
|
| 231 |
+
"venue": "J. Analyse Math., 17:71\u2013107, 1966.",
|
| 232 |
+
"url": null
|
| 233 |
+
}
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"11": {
|
| 237 |
+
"title": "Approximation by superpositions of a sigmoidal function.",
|
| 238 |
+
"author": "G. Cybenko.",
|
| 239 |
+
"venue": "Math. Control Signals Systems, 2(4):303\u2013314, 1989.",
|
| 240 |
+
"url": null
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"12": {
|
| 245 |
+
"title": "Splines as linear combinations of -splines. A survey.",
|
| 246 |
+
"author": "C. de Boor.",
|
| 247 |
+
"venue": "In Approximation theory, II (Proc. Internat. Sympos.,\nUniv. Texas, Austin, Tex., 1976), pages 1\u201347. Academic Press, New\nYork-London, 1976.",
|
| 248 |
+
"url": null
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"13": {
|
| 253 |
+
"title": "A practical guide to splines, volume 27 of Applied\nMathematical Sciences.",
|
| 254 |
+
"author": "C. de Boor.",
|
| 255 |
+
"venue": "Springer-Verlag, New York-Berlin, 1978.",
|
| 256 |
+
"url": null
|
| 257 |
+
}
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"14": {
|
| 261 |
+
"title": "The way things were in multivariate splines: a personal view.",
|
| 262 |
+
"author": "C. de Boor.",
|
| 263 |
+
"venue": "In Multiscale, nonlinear and adaptive approximation, pages\n19\u201337. Springer, Berlin, 2009.",
|
| 264 |
+
"url": null
|
| 265 |
+
}
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"15": {
|
| 269 |
+
"title": "Box splines, volume 98 of Applied Mathematical Sciences.",
|
| 270 |
+
"author": "C. de Boor, K. H\u00f6llig, and S. Riemenschneider.",
|
| 271 |
+
"venue": "Springer-Verlag, New York, 1993.",
|
| 272 |
+
"url": null
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"16": {
|
| 277 |
+
"title": "BERT: pre-training of deep bidirectional transformers for language\nunderstanding.",
|
| 278 |
+
"author": "J. Devlin, M. Chang, K. Lee, and K. Toutanova.",
|
| 279 |
+
"venue": "In Conference of the North American Chapter of the Association\nfor Computational Linguistics, pages 4171\u20134186. Association for\nComputational Linguistics, 2019.",
|
| 280 |
+
"url": null
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"17": {
|
| 285 |
+
"title": "Bivariate semialgebraic splines.",
|
| 286 |
+
"author": "M. DiPasquale and F. Sottile.",
|
| 287 |
+
"venue": "J. Approx. Theory, 254:105392, 19, 2020.",
|
| 288 |
+
"url": null
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"18": {
|
| 293 |
+
"title": "Semialgebraic splines.",
|
| 294 |
+
"author": "M. DiPasquale, F. Sottile, and L. Sun.",
|
| 295 |
+
"venue": "Comput. Aided Geom. Design, 55:29\u201347, 2017.",
|
| 296 |
+
"url": null
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"19": {
|
| 301 |
+
"title": "An image is worth words: Transformers for image\nrecognition at scale.",
|
| 302 |
+
"author": "A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai,\nT. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, J. Uszkoreit,\nand N. Houlsby.",
|
| 303 |
+
"venue": "In International Conference on Learning Representations, 2021.",
|
| 304 |
+
"url": null
|
| 305 |
+
}
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"20": {
|
| 309 |
+
"title": "Handbook of computer aided geometric design.",
|
| 310 |
+
"author": "G. Farin, J. Hoschek, and M.-S. Kim, editors.",
|
| 311 |
+
"venue": "North-Holland, Amsterdam, 2002.",
|
| 312 |
+
"url": null
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"21": {
|
| 317 |
+
"title": "Algebraic geometry, volume 133 of Graduate Texts in\nMathematics.",
|
| 318 |
+
"author": "J. Harris.",
|
| 319 |
+
"venue": "Springer-Verlag, New York, 1995.",
|
| 320 |
+
"url": null
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"22": {
|
| 325 |
+
"title": "Deep transformers without shortcuts: Modifying self-attention for\nfaithful signal propagation.",
|
| 326 |
+
"author": "B. He, J. Martens, G. Zhang, A. Botev, A. Brock, S. L. Smith, and Y. W. Teh.",
|
| 327 |
+
"venue": "In International Conference on Learning Representations, 2023.",
|
| 328 |
+
"url": null
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"23": {
|
| 333 |
+
"title": "Deep residual learning for image recognition.",
|
| 334 |
+
"author": "K. He, X. Zhang, S. Ren, and J. Sun.",
|
| 335 |
+
"venue": "In IEEE Conference on Computer Vision and Pattern Recognition,\npages 770\u2013778, 2016.",
|
| 336 |
+
"url": null
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"24": {
|
| 341 |
+
"title": "Lattice-ordered rings and function rings.",
|
| 342 |
+
"author": "M. Henriksen and J. R. Isbell.",
|
| 343 |
+
"venue": "Pacific J. Math., 12:533\u2013565, 1962.",
|
| 344 |
+
"url": null
|
| 345 |
+
}
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"25": {
|
| 349 |
+
"title": "Universal approximation with deep narrow networks.",
|
| 350 |
+
"author": "P. Kidger and T. Lyons.",
|
| 351 |
+
"venue": "In Conference on Learning Theory, pages 2306\u20132327, 2020.",
|
| 352 |
+
"url": null
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"26": {
|
| 357 |
+
"title": "The Metafont book.",
|
| 358 |
+
"author": "D. E. Knuth.",
|
| 359 |
+
"venue": "Addison-Wesley, Boston, MA, 1989.",
|
| 360 |
+
"url": null
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"27": {
|
| 365 |
+
"title": "Sima: Simple softmax-free attention for vision transformers.",
|
| 366 |
+
"author": "S. A. Koohpayegani and H. Pirsiavash.",
|
| 367 |
+
"venue": "In IEEE/CVF Winter Conference on Applications of Computer\nVision, pages 2607\u20132617, 2024.",
|
| 368 |
+
"url": null
|
| 369 |
+
}
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"28": {
|
| 373 |
+
"title": "An introduction to polynomial and semi-algebraic optimization.",
|
| 374 |
+
"author": "J. B. Lasserre.",
|
| 375 |
+
"venue": "Cambridge Texts in Applied Mathematics. Cambridge University Press,\nCambridge, 2015.",
|
| 376 |
+
"url": null
|
| 377 |
+
}
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"29": {
|
| 381 |
+
"title": "Tensors in computations.",
|
| 382 |
+
"author": "L.-H. Lim.",
|
| 383 |
+
"venue": "Acta Numer., 30:555\u2013764, 2021.",
|
| 384 |
+
"url": null
|
| 385 |
+
}
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"30": {
|
| 389 |
+
"title": "SOFT: softmax-free transformer with linear complexity.",
|
| 390 |
+
"author": "J. Lu, J. Yao, J. Zhang, X. Zhu, H. Xu, W. Gao, C. XU, T. Xiang, and L. Zhang.",
|
| 391 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 34, pages 21297\u201321309, 2021.",
|
| 392 |
+
"url": null
|
| 393 |
+
}
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"31": {
|
| 397 |
+
"title": "On the Pierce-Birkhoff conjecture.",
|
| 398 |
+
"author": "F. Lucas, D. Schaub, and M. Spivakovsky.",
|
| 399 |
+
"venue": "J. Algebra, 435:124\u2013158, 2015.",
|
| 400 |
+
"url": null
|
| 401 |
+
}
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"32": {
|
| 405 |
+
"title": "Introduction to tropical geometry, volume 161 of Graduate\nStudies in Mathematics.",
|
| 406 |
+
"author": "D. Maclagan and B. Sturmfels.",
|
| 407 |
+
"venue": "American Mathematical Society, Providence, RI, 2015.",
|
| 408 |
+
"url": null
|
| 409 |
+
}
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"33": {
|
| 413 |
+
"title": "On the Pierce-Birkhoff conjecture.",
|
| 414 |
+
"author": "L. Mah\u00e9.",
|
| 415 |
+
"venue": "Rocky Mountain J. Math., 14(4):983\u2013985, 1984.",
|
| 416 |
+
"url": null
|
| 417 |
+
}
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"34": {
|
| 421 |
+
"title": "The Pierce-Birkhoff conjecture for curves.",
|
| 422 |
+
"author": "M. Marshall.",
|
| 423 |
+
"venue": "Canad. J. Math., 44(6):1262\u20131271, 1992.",
|
| 424 |
+
"url": null
|
| 425 |
+
}
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"35": {
|
| 429 |
+
"title": "Handbook of splines, volume 462 of Mathematics and its\nApplications.",
|
| 430 |
+
"author": "G. Micula and S. Micula.",
|
| 431 |
+
"venue": "Kluwer Academic Publishers, Dordrecht, 1999.",
|
| 432 |
+
"url": null
|
| 433 |
+
}
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"36": {
|
| 437 |
+
"title": "Max-min representation of piecewise linear functions.",
|
| 438 |
+
"author": "S. Ovchinnikov.",
|
| 439 |
+
"venue": "Beitr\u00e4ge Algebra Geom., 43(1):297\u2013302, 2002.",
|
| 440 |
+
"url": null
|
| 441 |
+
}
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"37": {
|
| 445 |
+
"title": "Sparse sequence-to-sequence models.",
|
| 446 |
+
"author": "B. Peters, V. Niculae, and A. F. Martins.",
|
| 447 |
+
"venue": "In Annual Meeting of the Association for Computational\nLinguistics, pages 1504\u20131519, 2019.",
|
| 448 |
+
"url": null
|
| 449 |
+
}
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"38": {
|
| 453 |
+
"title": "Multi-modal fusion transformer for end-to-end autonomous driving.",
|
| 454 |
+
"author": "A. Prakash, K. Chitta, and A. Geiger.",
|
| 455 |
+
"venue": "In IEEE/CVF Conference on Computer Vision and Pattern\nRecognition, pages 7077\u20137087, 2021.",
|
| 456 |
+
"url": null
|
| 457 |
+
}
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"39": {
|
| 461 |
+
"title": "cosFormer: Rethinking softmax in attention.",
|
| 462 |
+
"author": "Z. Qin, W. Sun, H. Deng, D. Li, Y. Wei, B. Lv, J. Yan, L. Kong, and Y. Zhong.",
|
| 463 |
+
"venue": "In International Conference on Learning Representations, 2021.",
|
| 464 |
+
"url": null
|
| 465 |
+
}
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"40": {
|
| 469 |
+
"title": "The perceptron: A probabilistic model for information storage and\norganization in the brain.",
|
| 470 |
+
"author": "F. Rosenblatt.",
|
| 471 |
+
"venue": "Psychol. Rev., 65:386\u2013408, 1958.",
|
| 472 |
+
"url": null
|
| 473 |
+
}
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"41": {
|
| 477 |
+
"title": "Contributions to the problem of approximation of equidistant data by\nanalytic functions. Part A. On the problem of smoothing or graduation.\nA first class of analytic approximation formulae.",
|
| 478 |
+
"author": "I. J. Schoenberg.",
|
| 479 |
+
"venue": "Quart. Appl. Math., 4:45\u201399, 1946.",
|
| 480 |
+
"url": null
|
| 481 |
+
}
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"42": {
|
| 485 |
+
"title": "Spline functions and the problem of graduation.",
|
| 486 |
+
"author": "I. J. Schoenberg.",
|
| 487 |
+
"venue": "Proc. Nat. Acad. Sci. U.S.A., 52:947\u2013950, 1964.",
|
| 488 |
+
"url": null
|
| 489 |
+
}
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"43": {
|
| 493 |
+
"title": "A note on intrinsic supersmoothness of bivariate semialgebraic\nsplines.",
|
| 494 |
+
"author": "B. Shekhtman and T. Sorokina.",
|
| 495 |
+
"venue": "Comput. Aided Geom. Design, 98:Paper No. 102137, 5, 2022.",
|
| 496 |
+
"url": null
|
| 497 |
+
}
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"44": {
|
| 501 |
+
"title": "Handbook on splines for the user.",
|
| 502 |
+
"author": "E. V. Shikin and A. I. Plis.",
|
| 503 |
+
"venue": "CRC Press, Boca Raton, FL, 1995.",
|
| 504 |
+
"url": null
|
| 505 |
+
}
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"45": {
|
| 509 |
+
"title": "Attention is all you need.",
|
| 510 |
+
"author": "A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez,\n\u0141. Kaiser, and I. Polosukhin.",
|
| 511 |
+
"venue": "In Advances in Neural Information Processing Systems,\nvolume 30, 2017.",
|
| 512 |
+
"url": null
|
| 513 |
+
}
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"46": {
|
| 517 |
+
"title": "On the Pierce-Birkhoff conjecture for smooth affine surfaces over\nreal closed fields.",
|
| 518 |
+
"author": "S. Wagner.",
|
| 519 |
+
"venue": "Ann. Fac. Sci. Toulouse Math. (6), 19:221\u2013242, 2010.",
|
| 520 |
+
"url": null
|
| 521 |
+
}
|
| 522 |
+
},
|
| 523 |
+
{
|
| 524 |
+
"47": {
|
| 525 |
+
"title": "Spline models for observational data, volume 59 of CBMS-NSF Regional Conference Series in Applied Mathematics.",
|
| 526 |
+
"author": "G. Wahba.",
|
| 527 |
+
"venue": "Society for Industrial and Applied Mathematics, Philadelphia, PA,\n1990.",
|
| 528 |
+
"url": null
|
| 529 |
+
}
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"48": {
|
| 533 |
+
"title": "Replacing softmax with ReLU in vision transformers.",
|
| 534 |
+
"author": "M. Wortsman, J. Lee, J. Gilmer, and S. Kornblith.",
|
| 535 |
+
"venue": "arXiv:2309.08586, 2023.",
|
| 536 |
+
"url": null
|
| 537 |
+
}
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"49": {
|
| 541 |
+
"title": "Garrett Birkhoff and applied mathematics.",
|
| 542 |
+
"author": "D. M. Young.",
|
| 543 |
+
"venue": "Notices Amer. Math. Soc., 44(11):1446\u20131450, 1997.",
|
| 544 |
+
"url": null
|
| 545 |
+
}
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"50": {
|
| 549 |
+
"title": "RT-2: Vision-language-action models transfer web knowledge to\nrobotic control.",
|
| 550 |
+
"author": "B. Zitkovich, T. Yu, S. Xu, P. Xu, T. Xiao, F. Xia, J. Wu, P. Wohlhart,\nS. Welker, A. Wahid, Q. Vuong, V. Vanhoucke, H. Tran, R. Soricut, A. Singh,\nJ. Singh, P. Sermanet, P. R. Sanketi, G. Salazar, M. S. Ryoo, K. Reymann,\nK. Rao, K. Pertsch, I. Mordatch, H. Michalewski, Y. Lu, S. Levine, L. Lee,\nT.-W. E. Lee, I. Leal, Y. Kuang, D. Kalashnikov, R. Julian, N. J. Joshi,\nA. Irpan, B. Ichter, J. Hsu, A. Herzog, K. Hausman, K. Gopalakrishnan, C. Fu,\nP. Florence, C. Finn, K. A. Dubey, D. Driess, T. Ding, K. M. Choromanski,\nX. Chen, Y. Chebotar, J. Carbajal, N. Brown, A. Brohan, M. G. Arenas, and\nK. Han.",
|
| 551 |
+
"venue": "In Conference on Robot Learning, volume 229, pages 2165\u20132183,\n2023.",
|
| 552 |
+
"url": null
|
| 553 |
+
}
|
| 554 |
+
}
|
| 555 |
+
],
|
| 556 |
+
"url": "http://arxiv.org/html/2408.09624v1"
|
| 557 |
+
}
|
20240819/2408.09657v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09702v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09722v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09727v1.json
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Quantitative 3D Map Accuracy Evaluation Hardware and Algorithm for LiDAR(-Inertial) SLAM",
|
| 3 |
+
"abstract": "Accuracy evaluation of a 3D pointcloud map is crucial for the development of autonomous driving systems.\nIn this work, we propose a user-independent software/hardware system that can quantitatively evaluate the accuracy of a 3D pointcloud map acquired from LiDAR(-Inertial) SLAM.\nWe introduce a LiDAR target that functions robustly in the outdoor environment, while remaining observable by LiDAR.\nWe also propose a software algorithm that automatically extracts representative points and calculates the accuracy of the 3D pointcloud map by leveraging GPS position data.\nThis methodology overcomes the limitations of the manual selection method, that its result varies between users.\nFurthermore, two different error metrics, relative and absolute errors, are introduced to analyze the accuracy from different perspectives.\nOur implementations are available at:\nhttps://github.com/SangwooJung98/3D_Map_Evaluation",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction and Related Works",
|
| 9 |
+
"text": "LiDAR (LiDAR) is one of the representative sensors that provide 3D points containing accurate information around it.\nDue to its robustness of light conditions and convenient pointcloud data generation, LiDAR is widely exploited in SLAM (SLAM) research [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###].\nWith the development of LiDAR SLAM, the importance of an accurate 3D pointcloud map is rising as it can be exploited in various fields such as autonomous car driving [6 ###reference_b6###, 7 ###reference_b7###], high-resolution map [8 ###reference_b8###], and long-term map management [9 ###reference_b9###].\nFollowing the increase of 3D pointcloud map utilization, evaluating the accuracy of each map has also become important.\nThe accuracy of a 3D pointcloud map generated by LiDAR SLAM is generally approached indirectly by the accuracy of odometry, which is another output of LiDAR SLAM. [10 ###reference_b10###]\nTraditionally, the direct calculation of the accuracy of a 3D pointcloud map is performed by placing a small object at a specific location and selecting a single point that represents the object from the 3D map by human hand.\nHowever, due to the characteristic of LiDAR that the pointcloud density decreases proportional to the distance from the sensor, selection of the representative point may vary depending on the user.\nTo overcome the problems mentioned above, we propose a target base 3D pointcloud map accuracy measurement algorithm.\nResearch on targets for LiDAR [11 ###reference_b11###, 12 ###reference_b12###, 13 ###reference_b13###] has been done while their design purpose was mostly the extrinsic calibration between LiDAR and cameras.\nUnlike existing LiDAR targets, the proposed target is robust in outdoor environments while remaining detectable by LiDAR.\nFurthermore, by exploiting K-means clustering [14 ###reference_b14###], RANSAC (RANSAC) [15 ###reference_b15###], and singular value decomposition to the target pointcloud, the target position can be extracted consistently without depending on the user.\nWith the calculated target pose, we propose two different error metrics (relative and absolute errors) for measuring the accuracy of 3D pointcloud maps.\nAn overview of the two error metrics is illustrated in Fig. 1 ###reference_###, while the major contributions of the work are as follows:\n###figure_1### We introduce the hardware design of the LiDAR target, which is robust in the outdoor environment, and a software algorithm that calculates the position of each LiDAR target included in the 3D pointcloud map.\nWe suggest two different error metrics (relative and absolute error) that can be exploited for analyzing the accuracy of the 3D pointcloud map from different perspectives.\nWe release our evaluation algorithm and hardware design to the public.\n###figure_2### ###figure_3### ###figure_4###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Hardware Design and Target Pose Estimation",
|
| 15 |
+
"text": "Due to the random wind, dust, and other unpredictable factors of the outdoor environment, the target should be 1) robust to wind, 2) GPS attachable, and 3) easy to build.\nUsing the 3030 aluminum extrusion and cross-shaped meat grills, we built an outdoor robust LiDAR target, as shown in Fig. 3 ###reference_###.\nThe length of each edge is 0.6, and the ground truth target pose can be acquired using the GPS sensor (Sokkia GRX3) attached to the top of it.\nFig. 2 ###reference_### illustrates the overall pipeline of the full algorithm.\nAs an output of LiDAR SLAM framework, a global 3D pointcloud map is acquired.\nBy cropping a sphere with a 5 radius, centered on the GPS target pose obtained from the GPS sensor attached to the target, a pointcloud that includes the target can be cropped from the full 3D map.\nThis process is defined as loose cropping, as the resulting pointcloud includes both the target and surrounding ground points.\nAlternatively, the loose cropping process can be achieved by cropping the target from the full 3D map manually without GPS.\nFollowing loose cropping, the ground points of the loosely cropped pointcloud can be removed manually.\nThis process is defined as tight cropping, as the resulting pointcloud mostly consists of the target itself with only a small number of outliers.\nAlthough the tight cropping procedure is done manually, ground removal from the loosely cropped pointcloud is much easier than selecting a single point that can represent the target from the full 3D map.\nAs a result, the tight cropping result is more consistent among different users compared to the previous methods.\nThe cropping process is displayed in Fig. 4 ###reference_###.\n###figure_5### ###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10### After extracting the tightly cropped pointcloud from the 3D pointcloud map, the calculation of its pose is achieved through the following three steps.\nFirst, K-means clustering with is applied to the tightly cropped pointcloud, dividing it into two groups of points, . Each cluster represents one plate of the target.\nNext, RANSAC is applied to the pointcloud of each plate, , in order to remove the outliers.\nThe fitting function of RANSAC is a plane function based on random 3 points, and the inlier threshold is 0.03.\nAfter RANSAC, inlier pointclouds that represent each plate, remain.\nFinally, SVD is applied to each inlier pointcloud, , calculating the optimal plane function, .\nAn example of data processing at each step is displayed in Fig. 5 ###reference_###.\nBased on the strategy, should be perpendicular while each plane itself is perpendicular to the ground.\nOnly plane functions that satisfy the perpendicular conditions within a threshold of 1 proceed to the target pose calculation, while those that do not meet the threshold are redirected back to the K-means clustering process.\nFrom the functions of , the intersection line is calculated as a cross of the normal vectors of and .\nIf the target is perpendicular to the ground, the estimated target pose is ideally defined by the intersection point between and the ground plane.\nHowever, inevitable errors exist in the angle between intersection line and the ground plane.\nFurthermore, the target is not exactly located at ; the target has its own height along the axis.\nTherefore, the point on corresponding to the the average height of and is utilized as the estimated target pose.\nTo enhance the robustness of our algorithm, we calculated 100 sample poses for each target, averaging these to obtain the final estimated target pose.\n###figure_11###"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Relative and Absolute Error Metric",
|
| 21 |
+
"text": "Based on the estimated target pose and GPS-based ground truth pose, we propose two different error metrics that represent the accuracy of a 3D pointcloud map in various aspects.\nBefore calculating the accuracy of the map, the estimated target pose and ground truth pose has to be aligned since the frame of the acquired 3D map may not match the frame of the ground truth pose.\nWe exploited a 2D image registration method based on the argmin function to minimize the sum of distances between the corresponding estimated target pose and GPS target pose.\nThis is achieved by translating and rotating the map frame optimally.\nThe 2D transformation matrix and 2D translation vector is obtained by the following Eq. (1 ###reference_###):\nwhere N is the number of targets, and denote the estimated target pose and GPS target pose respectively.\nAfter registering the estimated target pose to GPS target pose, map accuracy is calculated based on two different metrics: Relative error and Absolute error.\nRelative error is calculated by averaging the difference of distance between the estimated target positions with the GPS-based distance between the same combination of targets.\nFor the relative error calculation of n targets, distance error is calculated for all combinations of targets and the average is defined as the relative error.\nIn this study, 5 targets were exploited and the relative error was calculated by averaging 10 different combinations.\nSince relative error is calculated based on the distance error between the targets, it focuses on evaluating the general accuracy of the map.\nAbsolute error is the average of the distances between the estimated target position and its corresponding GPS-based target position.\nAs absolute error is calculated based on the target position error itself, the absolute error metric focuses more on the map accuracy of the local area where the targets are positioned.\nFurthermore, high standard deviation of the absolute errors indicate that the accuracy of the 3D map varies across different locations of the map."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Experiments and Results",
|
| 27 |
+
"text": "We tested the proposed algorithm based on a real-world dataset acquired from a highway construction site.\nThe 3D pointcloud map was generated by the LiDAR SLAM algorithm LIO-SAM[1 ###reference_b1###], while Velodyne VLP-32C LiDAR, MicroStrain 3DM-GX5-25 9 DoF IMU, and NovAtel CPT7 GPS were exploited as the hardware.\nThe experiment environment and hardware position are displayed in Fig. 6 ###reference_###.\nWith consistent target and environment settings, we acquired 5 sequences and calculated the relative and absolute errors for each sequence.\nThe experiment results are displayed in Table. 1 ###reference_### and Table. 2 ###reference_###.\nThe experiment results show that relative error and absolute error have similar tendencies following the sequences.\nSequences with smaller relative error show small absolute error, and vice versa.\nThis indicates that the general map error of the 3D map is highly related to the local map error, suggesting that one error metric includes information of the other error metric indirectly.\nFurthermore, as presented in Table. 1 ###reference_###, there is a tendency for the relative error to increase as the distance between targets increases.\nDue to this effect, relative error may show higher values for larger maps.\nOn the other hand, absolute error compares the position of a single target based on GPS target pose and estimated target pose, providing map-size-independent accuracy information.\nFor absolute error, the results displayed in Table. 2 ###reference_### indicate that the error from targets 2, 3, and 4 tends to be lower than those from targets 1 and 5.\nThis is due to the robot\u2019s trajectory which makes an ellipsoid around the targets, visiting targets 1 and 5 only once while other targets are visited twice.\nIt is challenging to keep the factors such as the distance from the robot to each target and the number of visits per target constant across different sequences.\nIn order to remove these effects and measure the accuracy of the 3D pointcloud map robustly, the relative error metric may be exploited."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "5",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Conclusion",
|
| 33 |
+
"text": "In this work, we propose hardware and a software algorithm that can be exploited to measure the accuracy of a 3D pointcloud map while maintaining the robustness in outdoor environments and LiDAR sparsity.\nFurthermore, we introduce two different error metrics, Relative error and Absolute error, that represent general map accuracy and local map accuracy.\nThrough the real-world experiment, we demonstrated the relationship between two error metrics and the robustness of our hardware and algorithm.\nThis approach offers insights into the local and global quality of the 3D pointcloud map, enhancing evaluation standards for LiDAR(-Inertial) SLAM algorithms.\nTo overcome the current limitations of this research, we plan to develop a fully automated software that operates without any manual intervention."
|
| 34 |
+
}
|
| 35 |
+
],
|
| 36 |
+
"appendix": [],
|
| 37 |
+
"tables": {
|
| 38 |
+
"1": {
|
| 39 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Relative error experiment results. The relative error() and standard deviation() of each sequence is written in <span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1\">bold</span>. t1 & t2 indicates the error in the distance between target 1 and target 2.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S2.T1.6.2\" style=\"width:455.3pt;height:83.3pt;vertical-align:-0.8pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-70.5pt,12.8pt) scale(0.763527028212051,0.763527028212051) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S2.T1.6.2.2\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.2\">\n<td class=\"ltx_td ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.5.1.1.1.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">\n (m)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">\n (m)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t1 & t2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t1 & t3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t1 & t4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t1 & t5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t2 & t3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t2 & t4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t2 & t5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t3 & t4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t3 & t5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt ltx_border_t\" id=\"S2.T1.6.2.2.2.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">t4 & t5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.3.1.2.1\">0.0754</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.3.1.3.1\">0.0413</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0197</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0826</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0701</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1271</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1021</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0902</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1470</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0140</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0440</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.2.2.3.1.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0569</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.4.2.2.1\">0.0647</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.4.2.3.1\">0.0352</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0133</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0697</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0546</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1136</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0836</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0680</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1269</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0141</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0445</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.2.2.4.2.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0589</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.5.3.2.1\">0.0499</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.5.3.3.1\">0.0315</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0257</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0748</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0903</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0932</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0488</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0650</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0680</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0132</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0167</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.2.2.5.3.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0030</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.6.4.2.1\">0.0532</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.6.4.3.1\">0.0290</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0056</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0455</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0708</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0912</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0521</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0765</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0971</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0265</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0461</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.2.2.6.4.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0206</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.2.2.7.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.7.5.2.1\">0.0633</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.2.2.7.5.3.1\">0.0431</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0095</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0796</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0988</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0990</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0900</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.9\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1089</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.10\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.1096</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.11\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0187</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.12\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0179</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_tt\" id=\"S2.T1.6.2.2.7.5.13\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0008</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 40 |
+
"capture": "Table 1: Relative error experiment results. The relative error() and standard deviation() of each sequence is written in bold. t1 & t2 indicates the error in the distance between target 1 and target 2."
|
| 41 |
+
},
|
| 42 |
+
"2": {
|
| 43 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Absolute error experiment results. The absolute error() and standard deviation() of each sequence is written in <span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.8.1\">bold</span>.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S2.T2.6.2\" style=\"width:216.8pt;height:63pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-77.4pt,22.5pt) scale(0.58341533693276,0.58341533693276) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S2.T2.6.2.2\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.2\">\n<td class=\"ltx_td ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.5.1.1.1.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">\n(m)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">\n (m)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">target1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">target2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">target3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">target4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt ltx_border_t\" id=\"S2.T2.6.2.2.2.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">target5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.3.1.2.1\">0.0513</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.3.1.3.1\">0.0204</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0523</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0720</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0337</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0236</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.6.2.2.3.1.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0752</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.4.2.2.1\">0.0420</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.4.2.3.1\">0.0211</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0450</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0584</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0282</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0099</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.6.2.2.4.2.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0688</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.5.3.2.1\">0.0398</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.5.3.3.1\">0.0098</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0589</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0316</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0361</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0336</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.6.2.2.5.3.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0385</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.6.4.2.1\">0.0370</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.6.4.3.1\">0.0130</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0403</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0467</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0140</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0327</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.6.2.2.6.4.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0511</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.2.2.7.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.1\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">Sequence 5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.2\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.7.5.2.1\">0.0516</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.3\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.6.2.2.7.5.3.1\">0.0110</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.4\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0571</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.5\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0643</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.6\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0313</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_r ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.7\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0513</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_tt\" id=\"S2.T2.6.2.2.7.5.8\" style=\"padding-top:1.5pt;padding-bottom:1.5pt;\">0.0538</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 44 |
+
"capture": "Table 2: Absolute error experiment results. The absolute error() and standard deviation() of each sequence is written in bold."
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
"image_paths": {
|
| 48 |
+
"1": {
|
| 49 |
+
"figure_path": "2408.09727v1_figure_1.png",
|
| 50 |
+
"caption": "Figure 1: An overview of the absolute and relative error metrics. Absolute error is derived from the distance between N\ud835\udc41Nitalic_N pairs of corresponding target pose and ground truth pose. Relative error is derived from the distance between every two target poses.",
|
| 51 |
+
"url": "http://arxiv.org/html/2408.09727v1/x1.png"
|
| 52 |
+
},
|
| 53 |
+
"2": {
|
| 54 |
+
"figure_path": "2408.09727v1_figure_2.png",
|
| 55 |
+
"caption": "Figure 2: Pipeline of the algorithm. The target generation(green) process can be performed with any LiDAR based SLAM algorithms. Using the target pointclouds and GPS poses, target poses on the map are estimated (blue). Map accuracy is evaluated using the GPS target pose and estimated target pose (red).",
|
| 56 |
+
"url": "http://arxiv.org/html/2408.09727v1/x2.png"
|
| 57 |
+
},
|
| 58 |
+
"3(a)": {
|
| 59 |
+
"figure_path": "2408.09727v1_figure_3(a).png",
|
| 60 |
+
"caption": "(a) Target example 1\nFigure 3: LABEL:sub@fig:target1 and LABEL:sub@fig:target2 shows the target arrangement example. Due to the holes in the target, it is robust to wind in outdoor environments while remaining detectable by LiDAR.",
|
| 61 |
+
"url": "http://arxiv.org/html/2408.09727v1/x3.png"
|
| 62 |
+
},
|
| 63 |
+
"3(b)": {
|
| 64 |
+
"figure_path": "2408.09727v1_figure_3(b).png",
|
| 65 |
+
"caption": "(b) Target example 2\nFigure 3: LABEL:sub@fig:target1 and LABEL:sub@fig:target2 shows the target arrangement example. Due to the holes in the target, it is robust to wind in outdoor environments while remaining detectable by LiDAR.",
|
| 66 |
+
"url": "http://arxiv.org/html/2408.09727v1/extracted/5799262/figs/target2_rev4.jpg"
|
| 67 |
+
},
|
| 68 |
+
"4(a)": {
|
| 69 |
+
"figure_path": "2408.09727v1_figure_4(a).png",
|
| 70 |
+
"caption": "(a) Full Pointcloud\nFigure 4: Visualization of the cropping process. LABEL:sub@fig:sub2 is cropped automatically from LABEL:sub@fig:sub1 based on the GPS target pose. Ground points and outliers are manually removed from LABEL:sub@fig:sub2 to obtain LABEL:sub@fig:sub3. All black boxes indicate the same area that contains a single target.",
|
| 71 |
+
"url": "http://arxiv.org/html/2408.09727v1/extracted/5799262/figs/full_pcl.jpg"
|
| 72 |
+
},
|
| 73 |
+
"5(a)": {
|
| 74 |
+
"figure_path": "2408.09727v1_figure_5(a).png",
|
| 75 |
+
"caption": "(a) CAD modeling\nFigure 5: Example of target plane estimation in step by step. The red and blue points in LABEL:sub@fig:kmean and LABEL:sub@fig:ransac indicate the identified target planes respectively.",
|
| 76 |
+
"url": "http://arxiv.org/html/2408.09727v1/extracted/5799262/figs/target_cad_rev.png"
|
| 77 |
+
},
|
| 78 |
+
"5(b)": {
|
| 79 |
+
"figure_path": "2408.09727v1_figure_5(b).png",
|
| 80 |
+
"caption": "(b) K-means Clustering\nFigure 5: Example of target plane estimation in step by step. The red and blue points in LABEL:sub@fig:kmean and LABEL:sub@fig:ransac indicate the identified target planes respectively.",
|
| 81 |
+
"url": "http://arxiv.org/html/2408.09727v1/x4.png"
|
| 82 |
+
},
|
| 83 |
+
"5(c)": {
|
| 84 |
+
"figure_path": "2408.09727v1_figure_5(c).png",
|
| 85 |
+
"caption": "(c) RANSAC\nFigure 5: Example of target plane estimation in step by step. The red and blue points in LABEL:sub@fig:kmean and LABEL:sub@fig:ransac indicate the identified target planes respectively.",
|
| 86 |
+
"url": "http://arxiv.org/html/2408.09727v1/x5.png"
|
| 87 |
+
},
|
| 88 |
+
"5(d)": {
|
| 89 |
+
"figure_path": "2408.09727v1_figure_5(d).png",
|
| 90 |
+
"caption": "(d) SVD\nFigure 5: Example of target plane estimation in step by step. The red and blue points in LABEL:sub@fig:kmean and LABEL:sub@fig:ransac indicate the identified target planes respectively.",
|
| 91 |
+
"url": "http://arxiv.org/html/2408.09727v1/x6.png"
|
| 92 |
+
},
|
| 93 |
+
"6": {
|
| 94 |
+
"figure_path": "2408.09727v1_figure_6.png",
|
| 95 |
+
"caption": "Figure 6: Satellite view example of the experimental environment and target positions.",
|
| 96 |
+
"url": "http://arxiv.org/html/2408.09727v1/x7.png"
|
| 97 |
+
}
|
| 98 |
+
},
|
| 99 |
+
"validation": true,
|
| 100 |
+
"references": [],
|
| 101 |
+
"url": "http://arxiv.org/html/2408.09727v1"
|
| 102 |
+
}
|
20240819/2408.09735v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09756v1.json
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Parallel-in-Time Solutions with Random Projection Neural Networks",
|
| 3 |
+
"abstract": "This paper considers one of the fundamental parallel-in-time methods for the solution of ordinary differential equations, Parareal, and extends it by adopting a neural network as a coarse propagator. We provide a theoretical analysis of the convergence properties of the proposed algorithm and show its effectiveness for several examples, including Lorenz and Burgers\u2019 equations. In our numerical simulations, we further specialize the underpinning neural architecture to Random Projection Neural Networks (RPNNs), a layer neural network where the first layer weights are drawn at random rather than optimized. This restriction substantially increases the efficiency of fitting RPNN\u2019s weights in comparison to a standard feedforward network without negatively impacting the accuracy, as demonstrated in the SIR system example.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "In this paper, we consider initial value problems expressed as a system\nof first-order ordinary differential equations (ODEs). This wide class of problems arises in many social and natural sciences applications, including semi-discretized, time-dependent partial differential equations. We express a generic system of such differential equations as\nwhich will be our reference problem. Here, \u2032 denotes the derivative with respect to the time variable. To guarantee the existence and uniqueness of its solutions, we assume that is a Lipschitz-continuous vector field and for some . Solving an initial value problem like (1 ###reference_###) analytically is generally not a possibility, and hence one needs to rely on numerical approximations to the solution curve . Numerical techniques rely on introducing a time discretization of the interval , with steps , and computing approximations of the solution at the nodes , i.e., . A popular and established option is provided by one-step methods, such as Runge\u2013Kutta schemes, which relate to in terms of a map of the form . Collocation methods are a subset of Runge\u2013Kutta methods [1 ###reference_b1###, Section II.7] with particular relevance to this paper. These methods aim to approximate the solution on each interval with a real polynomial of a sufficiently high degree and coefficients in . The updated solution is then computed as evaluating the polynomial at as . To determine the coefficients of the polynomial , one needs to solve the system of algebraic equations for a set of collocation points .\nAs initial value problems define causal processes, many time-stepping schemes are sequential by nature, in the sense that to compute , one has to compute first. Nonetheless, multiple successful approaches such as Parareal [2 ###reference_b2###], PFASST [3 ###reference_b3###], PARAEXP [4 ###reference_b4###], and MGRIT [5 ###reference_b5###] have introduced some notion of parallel-in-time solution of initial value problems (1 ###reference_###), see for instance [6 ###reference_b6###] for an overview of existing methods.\nIn this work, we build upon the Parareal algorithm [2 ###reference_b2###]. The speedup in Parareal is achieved by coupling a fine time step integrator with a coarse step integrator. In each iteration, the coarse integrator updates the initial conditions of initial value problems on time subintervals, which can be solved in parallel and only entail fine step time integration over a short time. The elegance and strong theoretical grounding of the idea (see [7 ###reference_b7###, 8 ###reference_b8###], for instance) led to a number of variants of the Parareal algorithm, including combinations of Parareal with neural networks [9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###].\nIn recent years, solving differential equations with machine learning approaches gained in popularity; see, for instance, [12 ###reference_b12###] for a review. For learned methods to become staple solvers, understanding their properties and ensuring they reproduce the qualitative behavior of the solutions is paramount.\nThe problem of convergence and generalization for neural network-based PDE solvers has been considered in [13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###], for instance. An analysis of the approximation properties of neural networks in the context of PDE solutions is provided in [16 ###reference_b16###, 17 ###reference_b17###]. In the context of ODEs, there is an increasing interest in developing deep neural networks to learn time-stepping schemes unrestricted by constraints of the local Taylor series, including approaches based on flow maps [18 ###reference_b18###], model order reduction [19 ###reference_b19###], and spectral methods [20 ###reference_b20###].\nIn the context of combining Parareal with neural networks, Parareal with a physics-informed neural network as a coarse propagator was suggested in [10 ###reference_b10###]. In [9 ###reference_b9###], the authors introduced a parallel (deep) neural network based on parallelizing the forward propagation following similar principles to those behind Parareal. In [11 ###reference_b11###], it was proposed to learn a coarse propagator by parameterizing its stability function and optimizing the associated weights to minimize an analytic convergence factor of the Parareal method for parabolic PDEs.\nNeural networks are generally considered as a composition of parametric maps whose weights are all optimized so that a task of interest is solved with sufficient accuracy. The common choice of the optimization procedure is gradient-based algorithms, which start from a random set of initial weights and update them iteratively until the stopping criterion has been reached. A class of neural networks where some of the weights are not updated at all is often called Random Projection Neural Networks (RPNNs), sometimes also called Extreme Learning Machines (ELMs) [21 ###reference_b21###, 22 ###reference_b22###, 23 ###reference_b23###]. Despite their seemingly reduced capability of approximating functions, these neural networks retain most of the approximation results of more conventional neural networks. For example, as derived in [21 ###reference_b21###, Theorem 2.1], RPNNs with two layers and hidden neurons, where only the last layer is optimized while all other weights are independently sampled from any interval according to any continuous probability distribution, can interpolate with probability one any set of distinct input-to-output pairs. Their expressivity properties, see e.g. [24 ###reference_b24###, 23 ###reference_b23###, 25 ###reference_b25###], make them suitable for the approximation of solutions of ODEs which were successfully considered in [26 ###reference_b26###, 27 ###reference_b27###, 28 ###reference_b28###, 29 ###reference_b29###, 30 ###reference_b30###], yielding accurate approximations in a fraction of the training time when compared to more conventional networks."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Contributions",
|
| 15 |
+
"text": "In this work, we build a hybrid numerical method based on the Parareal framework, where a RPNN constitutes the coarse time stepping scheme. We first derive an a-posteriori error estimate for general neural network-based solvers of ODEs. This theoretical result allows us to replace the coarse integrator of the Parareal method with a RPNN while preserving its convergence guarantees. The RPNNs are trained online during the Parareal iterations. There are several benefits to the proposed procedure. First, our hybrid approach comes with theoretical guarantees and allows us to solve a differential equation such that the produced solution is accurate to a certain degree. Additionally, using RPNNs rather than a more conventional neural network leads to a significant speedup in the algorithm without sacrificing its capabilities. Indeed, as we show for the SIR problem, using RPNNs leads to about half of the computational time of the other method, even without accounting for the offline training phase of the more conventional network. Further, we demonstrate the effectiveness of the proposed approach, together with the timings of the components of the algorithm, and apply it to several examples in Section 6 ###reference_###."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Outline",
|
| 21 |
+
"text": "The outline of the paper is as follows. We start with introducing the Parareal algorithm and its convergence properties in Section 2 ###reference_###. Section 3 ###reference_### presents the theoretical derivation of an a-posteriori error estimate for neural network-based solvers. This result relies on a non-linear variation of the constants formula, also called the Gr\u00f6bner-Alekseev Theorem. In Section 4 ###reference_###, we propose a hybrid algorithm combining the Parareal framework with the RPNN-based coarse propagator. We study the convergence properties of this hybrid algorithm in Section 5 ###reference_###. The effectiveness of the proposed method is tested in Section 6 ###reference_### on the benchmark dynamical systems studied in [7 ###reference_b7###] with the addition of the SIR and ROBER problems. We conclude with the summary and analysis of the obtained results in Section 7 ###reference_###."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Parareal method",
|
| 27 |
+
"text": "This section introduces the Parareal algorithm [2 ###reference_b2###] and presents a convergence result needed for our derivations."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.1",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "The method",
|
| 33 |
+
"text": "The Parareal algorithm builds on two one-step methods that we call , denoting the fine and coarse integrators with timestep , respectively. There are multiple options to design such maps, one being to use the same one-step method but with finer or coarser timesteps, e.g.,\nThis strategy motivates the subscripts of the two maps since these methods rely on a fine and a coarse mesh. Another option to define and is to use methods of different orders, hence different levels of accuracy with the same timestep . Regardless of how we define these two methods, the map is more expensive to evaluate than . The goal of the Parareal algorithm is to get an approximate solution over the mesh , , with the same degree of accuracy as the one obtained with but in a shorter time. This is achieved by transforming (1 ###reference_###) into a collection of initial value problems on a shorter time interval by using . This zeroth iterate of the method consists of finding intermediate initial conditions by integrating (1 ###reference_###) with to get\nand define the initial value problems on the subintervals\nThese problems can now be solved in parallel using the fine integrator , which constitutes the parallel step in all successive Parareal iterates. A predictor-corrector scheme is used to iteratively update the initial conditions on the subintervals . Parareal iteration reads\nA common choice of a stopping criterion is for some tolerance . The parallel speedup is achieved if this criterion is met with far less iterates than the number of time intervals ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.2",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "Interpretation of the correction term",
|
| 39 |
+
"text": "Following [7 ###reference_b7###], we provide the interpretation of (3 ###reference_###) as an approximation of the Newton step for matching the exact flow at the time discretization points . We consider\nwhere with is the exact solution of the initial value problem\nLinearizing at the th iterate, , equating it to 0 and solving for the st iterate, , we arrive at the Newton update\nfor the solution of the system . The idea behind Parareal is then to approximate the unknown with , and the first order term with\nwhich yields (3 ###reference_###)."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.3",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "Convergence",
|
| 45 |
+
"text": "Convergence of the Parareal iterations was proven in [7 ###reference_b7###] under the assumption that the fine integrator and the exact flow map coincide.\nLet us consider the initial value problem (1 ###reference_###) and partition the time interval into intervals of size using a grid of nodes . Assume that the fine integrator coincides with the exact flow map , i.e. . Furthermore, suppose that there exist , a set of continuously differentiable functions , and such that\nfor every , and also that there exists such that\nfor every . Then there exists a positive constant such that, at the th iterate of the Parareal method, the following bound holds\nThis result guarantees that as the iteration progresses, the method provides an increasingly accurate solution. Furthermore, when , the last product on the right-hand side vanishes, which corresponds to the worst-case scenario of the sequential solution, a.k.a. at the th iterate, the above idealized Parareal method replicates the analytical solution for the time subintervals up to .\nWe take advantage of this convergence result in Section 4 ###reference_###, constructing the coarse propagator as a neural network satisfying the assumptions of Theorem 1 ###reference_orem1###."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "A-posteriori error estimate for neural network-based solvers",
|
| 51 |
+
"text": "We aim to design a hybrid parallel-in-time solver for (1 ###reference_###) based on the Parareal algorithm. This procedure consists of the Parareal iteration where the coarse propagator is replaced by a neural network. In Section 4 ###reference_###, we will focus on a particular class of neural networks, called Random Projection Neural Networks (RPNNs). For now, however, we do not specify the structure of the neural network and define it as a map , parametrized by weights , and satisfying the initial condition of the ODE, .\nIn the classical Parareal iteration, the coarse propagator is a map satisfying , where solves (1 ###reference_###).\nThe coarse propagator balances the cost versus accuracy of the approximation, with the sweet spot yielding optimal parallel speedup. With this in mind, we design our replacement to be a continuous function of time and to allow longer steps than commonly taken by single-step numerical methods as employed by . Motivated by collocation methods [1 ###reference_b1###, Chapter II.7], we choose the weights of the neural network so that it satisfies the differential equation (1 ###reference_###) at some collocation points in the interval . More explicitly, given a set , we look for a set of weights minimizing the loss function\nConsistent with our convention, in (6 ###reference_###) \u2032 denotes the time derivative, i.e., the derivative with respect to the first component.\nIn the following, we propose an error analysis for the approximate solution . This error analysis allows us to provide a-posteriori theoretical guarantees on both, the accuracy of the network as a continuous approximation of the solution, as well as its potential as a replacement of while keeping intact the convergence guarantees of Parareal. We focus on a practical error estimate based on quadrature rules. For an, albeit less practical, alternative estimate based on defect control see Section A ###reference_### of the supplementary material.\nAssume that the collocation points , with , define a Lagrange quadrature rule exact up to order for some given , i.e., there is a set of weights for which\nwhere is the set of real polynomials of degree .\nFor a set of collocation points satisfying Assumption 1 ###reference_umption1### and any scalar times continuously differentiable function , it holds [31 ###reference_b31###, Chapter 9]\nwhere is the derivative of of order .\nWe can now formulate a quadrature-based a-posteriori error estimate for the continuous approximation that only requires the defect to be sufficiently small at the collocation points.\nLet be the solution of the initial value problem (1 ###reference_###) with . Suppose that Assumption 1 ###reference_umption1### on the collocation points is satisfied and assume that is smooth and satisfies the collocation conditions up to some error of magnitude , i.e.\nThen, there exist two constants such that, for all ,\nThe proof of Theorem 2 ###reference_orem2### is based on the Gr\u00f6bner-Alekseev formula [1 ###reference_b1###, Theorem 14.5] that we now state for completeness.\nFor and consider the solutions and of the two ODEs\nassuming they both have a unique solution. For any times , let be the exact solution of the initial value problem\nThen, for any , one has\nWe now prove the a-posteriori error estimate in Theorem 2 ###reference_orem2### using Theorem 3 ###reference_orem3###.\nLet be the solution of the initial value problem (1 ###reference_###). Further note that satisfies the initial value problem\nSetting , from (11 ###reference_###) we obtain\nwhere bounds the norm of the Jacobian matrix of for by virtue of .\nApproximating the integral with the quadrature and subsequently bounding the residual at the collocation points, we obtain\nwhere and\nthe right-hand side of (8 ###reference_###). To conclude the proof we set , .\n\u220e\nWhile for the proof it suffices that is finite, more practical bounds based on the one-sided Lipschitz constant of the vector field can be obtained. We derive such a bound in Section B ###reference_### of the supplementary material.\nGiven , Theorem 2 ###reference_orem2### implies that the approximation provided by the neural network is as accurate as the one provided by a th order one-step method with step size . This result allows us to replace the coarse integrator with a neural network-based solver maintaining the convergence properties of Parareal."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Parareal method based on Random Projection Neural Networks",
|
| 57 |
+
"text": "The theoretical results presented in Section 3 ###reference_### hold for generic continuous approximate solutions, particularly those provided by any neural network . We now restrict the neural architecture to Random Projection Neural Networks (RPNNs) which allow a more efficient, hence faster, solution of the optimization problem (6 ###reference_###) as we will highlight in Section 6 ###reference_###."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Architecture design",
|
| 63 |
+
"text": "RPNNs are feedforward neural networks composed of two layers, with trainable parameters confined to the outermost layer.\nWe draw the weights of the first layer from the continuous uniform distribution , for a lower bound and an upper bound which are set to and , respectively, in all our experiments. We then aim to approximate the solution of (1 ###reference_###) at a time with the parametric function\nby training the weights collected in the matrix . Here, , , is a given set of basis functions with ,\nand a smooth activation function. In the numerical experiments, we always choose . The architecture in (12 ###reference_###) satisfies the initial condition of (1 ###reference_###), i.e., .\nIn addition, and have the same regularity."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Algorithm design",
|
| 69 |
+
"text": "Our method closely mimics the Parareal algorithm, but with the network (12 ###reference_###) deployed as a coarse propagator in the Parareal update (3 ###reference_###). While in the classical Parareal algorithm the coarse propagator is assumed to be known for all sub-intervals and Parareal iterations, we do not make this assumption in our approach. Instead, we determine individual weights for the update of each of the initial conditions featuring in the update (3 ###reference_###) to allow for a better adaptation to the local behavior of the approximated solution. Furthermore, our neural coarse integrator is not known ahead of time but is recovered and changing in the course of the Parareal iteration. Learning the coarse integrator involves training a RPNN on each of the sub-intervals to solve the ODE (1 ###reference_###) at a set of fixed collocation points in the sub-interval. This procedure would be prohibitively expensive for generic neural networks trained with gradient-based methods. However, for RPNNs, estimating the matrix in (12 ###reference_###) is considerably cheaper and comparable with classical collocation approaches, striking a balance between the computational efficiency, desirable behavior, and flexibility of the integrator. Finally, in Section 5 ###reference_### we demonstrate that our approach is provably convergent."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Training strategy",
|
| 75 |
+
"text": "The neural coarse propagator for solution (1 ###reference_###) on the time interval is obtained by splitting the interval into sub-intervals, , , and training individual RPNNs in sequence. On the th sub-interval we train a RPNN of the form (12 ###reference_###) to solve the ODE system (1 ###reference_###) approximately on this sub-interval. The initial condition at time is obtained by the evaluation of the previous Parareal correction step. Since the vector field in (1 ###reference_###) does not explicitly depend on the time variable, we can restrict our presentation to a solution on a sub-interval\nwhere the superscript refers to th Parareal iterate.\nTo train a RPNN (12 ###reference_###) on the sub-interval , we introduce collocation points , where the subscript keeps track of the interval length emphasizing the independent choice of collocation points on each sub-interval. For a given initial condition , we find a matrix such that approximately satisfies (13 ###reference_###) for all , , by solving the optimization problem\nThis hybrid Parareal method returns approximations of the solution at the time nodes , which we call . Furthermore, since the RPNNs on sub-intervals are smooth functions of time, one could also access a piecewise smooth approximation of the curve by evaluating the individual RPNNs upon convergence of the Parareal iteration\nHere, and are the weight matrix and the initial condition at the time in the final Parareal iteration. Note that even though the points are updated in each Parareal iteration (3 ###reference_###), they do not tend to change drastically, and we can initialize in (14 ###reference_###) with the previous iterate to speedup convergence. We terminate the Parareal iteration when either the maximum number of iterations is reached, or the difference between two consecutive iterates satisfies a given tolerance."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.4",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Implementation details",
|
| 81 |
+
"text": "Our hybrid Parareal method is described in Algorithm 1 ###reference_### and the Python code can be found in the associated GitHub repository111https://github.com/davidemurari/RPNNHybridParareal ###reference_Parareal###. The zeroth iterate of the method, starting in line 3 ###reference_3###, only relies on RPNNs to get intermediate initial conditions , . These initial conditions are then used to solve with the fine integrators the initial value problems in parallel, see line 10 ###reference_10###. These approximations are subsequently updated in the Parareal correction step of line 16 ###reference_16###.\nThe Algorithm 1 ###reference_### relies on solving a nonlinear optimization problem in lines 4 ###reference_4### and 14 ###reference_14### to update the weights .\nFor all systems studied in Section 6 ###reference_### but the Burgers\u2019 equation, we use the Levenberg\u2013Marquardt algorithm [32 ###reference_b32###, Chapter 10]. For Burgers\u2019 equation, we rely on the Trust Region Reflective algorithm [33 ###reference_b33###] to exploit the sparsity of the Jacobian matrix. The optimization algorithms are implemented with the least_squares method of scipy.optimize. In both cases, we provide an analytical expression of the Jacobian of the loss function with respect to the weight , derived in Section C ###reference_### of the supplementary material. Additionally, all the systems but the ROBER problem are solved on a uniform grid, i.e., . For the ROBER problem, we work with a non-uniform grid, refined in , to capture the spike in the solution occurring at small times.\nAs common in neural network-based approaches for solving differential equations, see, e.g. [30 ###reference_b30###], we opt for equispaced collocation points in each time interval. We also tested Lobatto quadrature points in the Lorenz example in subsection 6.3 ###reference_###. In all experiments, we set and the number of hidden neurons to match."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Convergence of the RPNN-based Parareal method",
|
| 87 |
+
"text": "In this section, we study the convergence properties of Algorithm 1 ###reference_###. Following the Parareal analysis in Theorem 1 ###reference_orem1### we only need to consider the time interval and collocation points satisfying Assumption 1 ###reference_umption1###.\nWe write our solution ansatz, (12 ###reference_###), and its time derivative evaluated at the collocation points as the matrices\nand shorthand the evaluation of the vector field on the rows of the matrix\nwith the canonical basis of .\nWe further rewrite the ansatz as\n, where ,\nwith , , and evaluated componentwise while denotes the componentwise product. As for the experiments, we restrict ourselves to the case \nfor which one can prove, see [21 ###reference_b21###, Theorem 2.1],\nthat with probability one and are invertible for drawn from any continuous probability distribution.\nFinally, in , accounts for the initial condition.\nFor the loss function\nwith in defined as in (12 ###reference_###), a smooth Lipschitz activation function, and a choice of step size\nthere exists a unique Lipschitz continuous function such that for every .\nWe remark that the loss function in (16 ###reference_###) using the Frobenius norm is a reformulation of (6 ###reference_###) in a matrix form. We now prove Theorem 4 ###reference_orem4### using a parameterized version of Banach Contraction Theorem presented in [34 ###reference_b34###, Lemma 1.9].\nThe requirement implies that the ansatz and its derivative, , satisfy the ODE (13 ###reference_###), , which can be equivalently written as . We introduce the fixed point map\nand, when not differently specified, we denote with the Lipschitz constant of a Lipschitz continuous function with respect to the norm. Since , we have\nas . Furthermore,\nfor any . Setting we conclude that is Lipschitz continuous with constant for satisfying (17 ###reference_###), as\nfor any . We note that the norm of can be used since for any pair of matrices of compatible dimensions, it holds . Furthermore, is Lipschitz continuous with Lipschitz constant given by , since\nBy [34 ###reference_b34###, Lemma 1.9], we can hence conclude that, provided satisfies (17 ###reference_###), there is a well-defined Lipschitz continuous function , with\nsuch that for every , or equivalently .\n\u220e\nConsider the initial value problem (1 ###reference_###) with a smooth Lipschitz continuous vector field. Suppose that the time interval is partitioned into intervals of size such that satisfies (17 ###reference_###) and choose the collocation points to satisfy the Assumption 1 ###reference_umption1###. Let be a smooth Lipschitz function. Then for the coarse integrator with as in Theorem 4 ###reference_orem4### and the fine integrator , there exist positive constants such that, at the th iterate of the hybrid Parareal method, the following bound holds\nThe proof is based on showing that our network satisfies assumptions (4 ###reference_###) and (5 ###reference_###) of Theorem 1 ###reference_orem1###. Theorem 4 ###reference_orem4### guarantees that, for satisfying (17 ###reference_###), is Lipschitz continuous with Lipschitz constant . Further noting that as we can write\nfor any , where , and hence condition (5 ###reference_###) is satisfied. Given that , as guaranteed by Theorem 4 ###reference_orem4###, satisfies the collocation conditions exactly, Theorem 2 ###reference_orem2### ensures that there exists such that . Because of the smoothness of and , one can also Taylor expand in time and guarantee the existence of continuously differentiable functions such that\nThis allows concluding that (4 ###reference_###) holds, and the hybrid Parareal satisfies (18 ###reference_###).\n\u220e\nAs for the classical Parareal method, at the th iterate, our hybrid Parareal method with the exact fine integrator replicates the analytical solution at the time instants .\nIn practice, as presented in the previous section, we do not have access to the function , but we only approximate its value at the points involved in the hybrid Parareal iterates, i.e., . Let us denote by the function approximating , so that . This function is typically provided by a convergent iterative method minimizing (16 ###reference_###). Under the smoothness assumptions of Proposition 1 ###reference_position1### and supposing the map is Lipschitz continuous, i.e., the adopted optimization method depends regularly on the parameter , the convergence in Proposition 1 ###reference_position1### also holds for the approximate case. To see this, note that condition (4 ###reference_###) also holds for the approximate case as long as is smooth enough and the collocation conditions are solved sufficiently accurately. In practice, based on (9 ###reference_###), it suffices to have\nfor an , and every and iterate . Furthermore, assumption (5 ###reference_###) follows from the Lipschitz regularity of the approximate function ."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Numerical results",
|
| 93 |
+
"text": "This section collects several numerical tests that support our theoretical derivations. We consider six dynamical systems, four of which come from the experimental section in [7 ###reference_b7###], to which we add the SIR model and the ROBER problem. We assume that, for each of these systems, a single initial value problem is of interest and explore how RPNN-based coarse propagators perform for that initial value problem. For the one-dimensional Burgers\u2019 equation, we consider a semi-discretization with centered finite differences and provide the experimental results for different initial conditions, imposing homogeneous Dirichlet boundary conditions on the domain .\nThe chosen fine integrators are classical Runge\u2013Kutta methods with a smaller timestep than the coarse one . More explicitly, we assume that the coarse timestep is a multiple of the fine timestep and one coarse integrator step , corresponds to steps of the size of the fine integrator. In all experiments, we use equispaced time collocation points, and for the Lorenz system, we also use Lobatto points. For stiff problems such as Burgers\u2019 and ROBER\u2019s, we use the implicit Euler method (IE), with update , as a fine integrator, while for the others we found Runge-Kutta (RK4),\nwith\nto provide accurate solutions with moderately small step sizes. We specify the adopted timesteps in the dedicated sections below.\nThe purpose of this paper is to demonstrate that our hybrid Parareal method based on RPNNs is theoretically motivated and practically effective, rather than the high-performance implementation. Thus, most of our experiments are run on a single processor where the parallel speedup would result from parallel execution of the fine integrators on the sub-intervals, in proportion to the number of cores used. To demonstrate the principle in hardware we run the ROBER\u2019s problem on five processors available to us and compare to the serial application of the fine integrator, however Parareal benefits will scale up with the problem size and number of cores. For Burgers\u2019 equation, we again use five processors for convenience since this allows us to do 100 repeated experiments faster.\nIn all plots, the label \\saypara refers to the hybrid methodology with neural networks as coarse propagators, while \\sayref to the reference solution, obtained by the sequential application of the fine solver. We always plot the piecewise smooth Hybrid Parareal approximant constructed as (15 ###reference_###). We run the Hybrid Parareal until the difference between two consecutive iterates was at most . As a safeguard, we put a hard limit, , on the iteration number, which was, however, not triggered in any of our experiments. All experiments were run on a MacBook Pro 2020 with Intel i5 processor and all the computational times were averaged over 100 runs per experiment.\nFor each experiment, we report an average time per update of the coarse integrator on a sub-interval, which is also averaged over the number of sub-intervals. We measure the timing when computing the zeroth iterate in lines 3-6 of Algorithm 1 ###reference_### to isolate the effects of warm starts used in Parareal update in later iterations. We also report a total average time to compute the solution, including the above mentioned coarse integrator updates along with possibly parallel execution of the fine step integrators."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "6.1",
|
| 97 |
+
"parent_section_id": "6",
|
| 98 |
+
"section_name": "SIR",
|
| 99 |
+
"text": "The SIR model is one of the simplest systems considered in mathematical biology to describe the spread of viral infections. SIR consists of three coupled ODEs for with parameters , and :\n###figure_1### We use this example to compare two different types of coarse propagators, the RPNN-based approach with a neural operator-type flow map trained to approximate the solutions of the dynamical system described by (19 ###reference_###) for initial conditions in the compact set , and times in , see also [35 ###reference_b35###, 36 ###reference_b36###].\nGiven that the Parareal method needs to evaluate the coarse propagator on several initial conditions, the learned flow map is the most natural neural network-based alternative, while a standard Physics Informed Neural Network, which needs to be fitted for each initial condition, would be computationally too expensive. Both coarse propagators use the same coarse timestep , while the fine solver timestep is . The piecewise smooth approximations computed with both methods are plotted in Figure 1 ###reference_###. We report the corresponding timings in Table 1 ###reference_###.\nThe RPNN-based approach took an average of 0.3940 seconds to converge to the final solution over 100 repeated experiments, while the flow map approach took an average time of 0.8047 seconds. The reason behind the higher cost of the flow map approach is that RPNNs minimize the residual more accurately than the flow map approach since they are trained for the specific initial conditions of interest, leading to a faster convergence of the Parareal method. If the offline training phase is accounted for, about 20 more minutes must be considered for the flow map approach, while no offline training is required for the RPNN-based approach. The offline training cost depends on the chosen architecture and training strategy. These details are provided in Section D ###reference_### of the supplementary material.\nGiven the reported results, it is clear that while both methods are comparable in terms of accuracy, the distribution of the costs is considerably different. The flow map approach has a high training cost and a low evaluation cost but is also less accurate hence needing more Parareal iterations. On the other hand, the RPNN strategy, having no offline training phase and yielding more accurate solutions and hence needing fewer Parareal iterations, saves substantial time. For this reason, we will only focus on the RPNN-based approach in the following experiments."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6.2",
|
| 103 |
+
"parent_section_id": "6",
|
| 104 |
+
"section_name": "ROBER",
|
| 105 |
+
"text": "The ROBER problem is a prototypical stiff system of coupled ODEs with parameters , , and ,\nAs ROBER\u2019s solution spikes for short times, the usual approach is to discretize the time non-uniformly. Therefore we choose the coarse step size to be for times in and for times in . The fine integrator timestep is . We remark that ROBER\u2019s problem is commonly solved using a variable step-size method, for example, based on an embedded Runge-Kutta method [1 ###reference_b1###, Section II.4]. Fixing the step size allows us to understand how the proposed hybrid method performs on stiff equations without extra complication of step adaptivity. A variable step Parareal method (regardless if the coarse propagator is learned or classical), would involve adaptivity in both coarse and fine step and is beyond scope of this work.\n###figure_2### We report the obtained approximate solutions in Figure 2 ###reference_### and the timings in Table 2 ###reference_###. In these experiments, the fine integrators were executed in parallel on five cores. Thus, the total average time to compute the solution reflects the parallel speed up, albeit for a small number of cores. Given this stiff problem requires an implicit fine integrator, we expect the computational costs of the update of the coarse integrator, s, and one step of the fine integrator, s, to be closer than when using an explicit scheme as it is the case in the remaining examples. Additionally, to cover one coarse step, the fine integrator needs to perform at least 100 steps, given our choices for and . These respective costs help to optimally balance the choice of the number of sub-intervals versus the number of fine steps in each sub-interval, along with practical considerations like the number of cores available."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6.3",
|
| 109 |
+
"parent_section_id": "6",
|
| 110 |
+
"section_name": "Lorenz",
|
| 111 |
+
"text": "For weather forecasts, real-time predictions are paramount, rendering parallel-in-time solvers highly relevant in this context. Lorenz\u2019s equations\ndescribe one simple model for weather prediction.\nDifferent parameter values give rise to considerably different trajectories for this system. We set , , and to have chaotic behavior. We compute an approximate solution up to time , using RPNNs as a coarse propagator with and RK4 with step as a fine integrator.\n###figure_3### To show that the algorithm is not overly sensitive to the choice of the collocation points, we repeated the simulations using Lobatto collocation points. The qualitative behavior of the produced solutions for one choice of trained weights is reported in Figure 3 ###reference_### and the corresponding timings in Table 3 ###reference_###. Although the Lorenz system is chaotic, the proposed hybrid solver provides an accurate approximate solution on the considered interval. Additionally, the average cost of one evaluation of the coarse RPNN-based integrator does not appear to depend strongly on the system\u2019s complexity but mostly on its dimension. Indeed, the average cost of one RPNN evaluation is comparable with the one for the SIR problem, see Table 1 ###reference_###."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "6.4",
|
| 115 |
+
"parent_section_id": "6",
|
| 116 |
+
"section_name": "Arenstorf orbit",
|
| 117 |
+
"text": "The three-body problem is a well-known problem in physics that pertains to the time evolution of three bodies interacting because of their gravitational forces. Changing the ratios between the masses, their initial conditions, and velocities, can starkly alter the system\u2019s time evolution, and many configurations have been thoroughly studied. One of them is the stable Arenstorf orbit, which arises when one of the masses is negligible and the other two masses orbit in a plane. The equations of motion for this specific instance of the three-body problem are\n, , and . This configuration leads to a periodic orbit of period [1 ###reference_b1###]. In practice, we transform (22 ###reference_###) into a first order system via the velocity variables and . We include the plot of the obtained solution for time up to and timesteps , and , in Figure 4 ###reference_### and the timings in Table 4 ###reference_###.\n###figure_4### This experiment serves to illustrate the benefits of using a Parareal-like correction of the neural network-based solution. Indeed, the approximate solution for short times does not accurately follow the correct trajectory. One possible remedy would be to restrict the step size as was done for the ROBER\u2019s problem. However, even for this larger time step choice, after just one step , the Parareal correction resets the initial condition for the next interval bringing the solution back onto the stable orbit. Thus, not relying solely on a network-based solution allows us to compute an accurate solution for the later times, even though initially the solution departs the orbit."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "6.5",
|
| 121 |
+
"parent_section_id": "6",
|
| 122 |
+
"section_name": "Viscous Burgers\u2019 equation",
|
| 123 |
+
"text": "Most of the systems considered up to now are low-dimensional. A natural way to test the method\u2019s performance on higher-dimensional systems is to work with spatial semi-discretizations of PDEs, where the mesh over which the spatial discretization is defined determines the system\u2019s dimension. We consider the one-dimensional Burgers\u2019 equation\nIn this section, we only report the results for the initial condition in Equation (23 ###reference_###), but we include results for two more choices of initial conditions in Section F ###reference_### of the supplementary material. All the experiments were run on five cores. In all tests we work with viscosity parameter , a uniform spatial grid of points in and\ncoarse and fine step sizes and , respectively. The spatial semi-discretization with centered finite differences writes\nwhere , , , , is the component-wise product, and are the centered finite difference matrices of first and second order, respectively, suitably corrected to impose the homogeneous Dirichlet boundary conditions on .\n###figure_5### We report the qualitative behavior of the solutions in Figure 5 ###reference_###. Subfigure (a) tracks the solution at ten equally spaced time instants in the interval . Subfigure (b) shows the solution surfaces obtained with the IE method on the left and the hybrid Parareal for one set of trained parameters on the right. We include the timings in Table 5 ###reference_###. We observe that the cost of the presented hybrid Parareal method grows with the dimensionality of the problem. However, we remark that for each of the components of the solution, we adopted only coefficients."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "7",
|
| 127 |
+
"parent_section_id": null,
|
| 128 |
+
"section_name": "Conclusions and future extensions",
|
| 129 |
+
"text": "In this manuscript, we proposed a hybrid parallel-in-time algorithm to solve initial value problems using a neural network as a coarse propagator within the Parareal framework. We derived an a-posteriori error estimate for generic neural network-based approximants. Based on these theoretical results we defined a hybrid Parareal algorithm involving RPNNs as coarse propagators which inherits the theoretical guarantees of the Parareal algorithm.\nWe compared our hybrid Parareal solver based on RPNNs with one based on the flow map approach on the SIR problem. We demonstrated that our approach led to lower computational costs and no offline training phase. We reserve the judgment of flow map performance. However, we also tested it for other examples, including the Brusselator, where we noticed that the offline training phase can be very intricate because one has to first identify a forward invariant subset of .\nThe most promising extension of this work is to include a mechanism allowing for time-adaptivity in the algorithm, i.e., for coarsening or refinement of the temporal grid based on the local behavior of the solution. It would also be interesting to test our approach on higher-dimensional systems with high-performance computing hardware."
|
| 130 |
+
}
|
| 131 |
+
],
|
| 132 |
+
"appendix": [
|
| 133 |
+
{
|
| 134 |
+
"section_id": "Appendix 1",
|
| 135 |
+
"parent_section_id": null,
|
| 136 |
+
"section_name": "Appendix A A-posteriori error estimate based on the defect",
|
| 137 |
+
"text": "We now derive an alternative a-posteriori estimate for network-based approximate solutions based on defect control.\nConsider the initial value problem (1 ###reference_###), given by\nwhere is continuously differentiable and admits a unique solution. Let satisfy\nThen satisfies the linear differential equation\nwhere\nand is the Jacobian matrix of .\nTo prove the lemma, it suffices to highlight that\n\u220e\nThe solution to the linear problem (24 ###reference_###) satisfies the following bound:\nLet solve the initial value problem in (24 ###reference_###). Suppose that for . Then,\nwhere\nis the logarithmic norm of .\nFor the proof of this lemma, see [38 ###reference_b38###, Theorem 10.6].\nAs we are interested in solving (1 ###reference_###), we set and introduce the defect function\nWe remark that the definition of is of the same form as the loss (6 ###reference_###). If it was known that for a tolerance and all , then by Lemma 2 ###reference_ma2### we could conclude that\nGiven that the solution is unknown, and its logarithmic norm cannot be computed exactly. Thus, for a more practical error estimate, we introduce an assumption on the existence of a compact subset such that for . Then, we can proceed with the inequality chain as\nwhere . Note that the right-hand side of (25 ###reference_###) is nonnegative for all . In particular, (25 ###reference_###) implies that a neural network can be employed to approximate the solution of (1 ###reference_###) which is as accurate as a classical coarse solver provided the norm of the defect is sufficiently small."
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"section_id": "Appendix 2",
|
| 141 |
+
"parent_section_id": null,
|
| 142 |
+
"section_name": "Appendix B Bound on the norm of the sensitivity matrix",
|
| 143 |
+
"text": "In this appendix, we provide a practical bound for the norm of the Jacobian of the flow map of a vector field , assumed to be continuously differentiable with respect to the initial condition. For this, we differentiate the initial value problem (1 ###reference_###), given by\nwith respect to and obtain\nwhere is the identity matrix. Equation (27 ###reference_###) is generally known as the variational equation of (26 ###reference_###). This ODE is a non-autonomous linear differential equation in the unknown matrix . In practice, (27 ###reference_###) should be solved jointly with (26 ###reference_###). However, for the purpose of bounding the Euclidean norm , it is not necessary to solve them. Following [39 ###reference_b39###, Chapter 2], we assume that for compact and all . This is not a restrictive assumption on compact time intervals given the assumed regularity for . Then, one can get\nwhere . We conclude that the constant in the proof of Theorem 2 ###reference_orem2### can be set to , with positive or negative depending on ."
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"section_id": "Appendix 3",
|
| 147 |
+
"parent_section_id": null,
|
| 148 |
+
"section_name": "Appendix C The Jacobian matrix of the loss function",
|
| 149 |
+
"text": "In this subsection, we consider the loss function (16 ###reference_###) and its gradient. Note that (16 ###reference_###) can be expressed as (6 ###reference_###) which in turn can be related to the solution of the non-linear matrix equation\nMore explicitly, we have\nTo minimize the loss function (6 ###reference_###), we need the Jacobian of the matrix-valued function . As is a matrix-valued function with matrix inputs, we rely on the vectorization operator, denoted by , using the machinery of matrix-calculus introduced, for example, in [40 ###reference_b40###]. We hence compute , given by\nwhere is the identity matrix, is the Kronecker product, and stacks the columns of the input matrix into a column vector. The Jacobian of in the last line depends on the vector field , while the other terms do not.\nMost of the dynamical systems we consider in the numerical experiments in Section 6 ###reference_### are of low dimension. For this reason, for all the cases but Burgers\u2019 equation, we assemble the Jacobian case by case, following this construction. For Burgers\u2019 equation, we instead implement it as a linear operator, specifying its action and the action of its transpose onto input vectors. For the Burgers\u2019 equation, we have\nand hence .\nThis expression implies that"
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"section_id": "Appendix 4",
|
| 153 |
+
"parent_section_id": null,
|
| 154 |
+
"section_name": "Appendix D Details on the network for the flow map approach",
|
| 155 |
+
"text": "In this section, we provide details on the network for the flow map approach required for the comparison of the training costs presented in Table 1 ###reference_###. The network used for the coarse propagator is based on the parametrization\nwhere . To train the network, implemented with PyTorch, we use the Adam optimizer for epochs, with each epoch consisting of minimizing the ODE residual over different randomly sampled collocation points ."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"section_id": "Appendix 5",
|
| 159 |
+
"parent_section_id": null,
|
| 160 |
+
"section_name": "Appendix E Experiment for Brusselator\u2019s equation",
|
| 161 |
+
"text": "This section collects numerical experiments for the Brusselator, which is a system of two scalar differential equations modeling a chain of chemical reactions [41 ###reference_b41###]. The equations write\nwhere we choose the parameters , . In this setting, one can prove to have a limit cycle in the dynamics. We simulate this system on the time interval , with a fine timestep and a coarse one of size .\n###figure_6### We repeat the simulation 100 times, reporting the average cost of one coarse timestep in Table 6 ###reference_###, together with the average total cost of the hybrid Parareal solver. Figure 6 ###reference_### shows the approximate solution and a reference solution. We also remark that, as desired, the hybrid method recovers the limit cycle."
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"section_id": "Appendix 6",
|
| 165 |
+
"parent_section_id": null,
|
| 166 |
+
"section_name": "Appendix F Additional experiments for Burgers\u2019 equation",
|
| 167 |
+
"text": "###figure_7### ###figure_8### In this section, we report the simulation results for the Burgers\u2019 equation with two more initial conditions. The setup of the network and the partition of the time domain are the same as for the initial condition included in Section 6.5 ###reference_###. In Figure 7 ###reference_###, we work with the initial condition , while in Figure 8 ###reference_### with . The timings are included in Tables 7 ###reference_### and 8 ###reference_###, respectively. As expected, the time to obtain the full solution grows with the complexity of the initial condition. Indeed, there are about 10 seconds of difference between the fastest, corresponding to the quadratic initial condition in Figure 8 ###reference_###, to the second fastest, the one with , and the slowest in Figure 8 ###reference_###. The reason behind this observed behavior is that, for more complicated solutions, the coarse predictions need to be corrected with the Parareal correction step more often, and the optimization problems to solve to get the coarse propagator get more expensive."
|
| 168 |
+
}
|
| 169 |
+
],
|
| 170 |
+
"tables": {
|
| 171 |
+
"1": {
|
| 172 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T1.1.2.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.1.2\">RPNN</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.1.3\">Flow</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S6.T1.1.1.2\">Offline training phase</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T1.1.1.3\">0s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T1.1.1.1\">\n20 minutes</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T1.1.3.1.1\">Average cost coarse step in the zeroth iterate</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.2\">0.0009773s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.3\">0.0002729s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T1.1.4.2.1\">Total</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.2\">0.3940s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.3\">0.8047s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>SIR: Computational time for the RPNN and flow map based Hybrid Parareal variants on a single core.</figcaption>\n</figure>",
|
| 173 |
+
"capture": "Table 1: SIR: Computational time for the RPNN and flow map based Hybrid Parareal variants on a single core."
|
| 174 |
+
},
|
| 175 |
+
"2": {
|
| 176 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T2.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T2.1.1.2\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.1.3\">RPNN</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.1.1\">Sequential IE, \n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T2.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S6.T2.1.2.1.1\">Average cost coarse step in the zeroth iterate</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T2.1.2.1.2\">0.001881s</td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S6.T2.1.2.1.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T2.1.3.2.1\">Average cost to produce the solution</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.2.2\">179.8280s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.2.3\">263.2613s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>ROBER: Computational time for Hybrid Parareal using five cores versus sequential application of IE with fine step .</figcaption>\n</figure>",
|
| 177 |
+
"capture": "Table 2: ROBER: Computational time for Hybrid Parareal using five cores versus sequential application of IE with fine step ."
|
| 178 |
+
},
|
| 179 |
+
"3": {
|
| 180 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T3.1.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T3.1.1.1.2\">Uniform</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T3.1.1.1.3\">Lobatto</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T3.1.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S6.T3.1.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T3.1.2.1.2\">0.0009430s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T3.1.2.1.3\">0.0009371s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T3.1.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T3.1.3.2.2\">1.8312s</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T3.1.3.2.3\">1.8184s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Lorenz: Computational time for the RPNN-based Hybrid Parareal with uniform and Lobatto nodes on a single core.</figcaption>\n</figure>",
|
| 181 |
+
"capture": "Table 3: Lorenz: Computational time for the RPNN-based Hybrid Parareal with uniform and Lobatto nodes on a single core."
|
| 182 |
+
},
|
| 183 |
+
"4": {
|
| 184 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T4\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T4.1.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T4.1.1.1.2\">RPNN</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T4.1.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S6.T4.1.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T4.1.2.1.2\">0.001912s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T4.1.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T4.1.3.2.2\">9.7957s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Arenstorf: Computational time for Hybrid Parareal using a single core.</figcaption>\n</figure>",
|
| 185 |
+
"capture": "Table 4: Arenstorf: Computational time for Hybrid Parareal using a single core."
|
| 186 |
+
},
|
| 187 |
+
"5": {
|
| 188 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T5\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T5.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T5.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T5.3.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T5.3.1.1.2\">RPNN</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T5.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S6.T5.3.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S6.T5.3.2.1.2\">0.2098s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T5.3.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S6.T5.3.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T5.3.3.2.2\">29.4740s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 5: </span>Burgers: Computational time for Hybrid Parareal using five cores, and initial condition .</figcaption>\n</figure>",
|
| 189 |
+
"capture": "Table 5: Burgers: Computational time for Hybrid Parareal using five cores, and initial condition ."
|
| 190 |
+
},
|
| 191 |
+
"6": {
|
| 192 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A5.T6\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A5.T6.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"A5.T6.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"A5.T6.1.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"A5.T6.1.1.1.2\">RPNN</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A5.T6.1.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"A5.T6.1.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"A5.T6.1.2.1.2\">0.001012s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A5.T6.1.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"A5.T6.1.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A5.T6.1.3.2.2\">0.1469s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 6: </span>Brusselator: Computational time for Hybrid Parareal using a single core.</figcaption>\n</figure>",
|
| 193 |
+
"capture": "Table 6: Brusselator: Computational time for Hybrid Parareal using a single core."
|
| 194 |
+
},
|
| 195 |
+
"7": {
|
| 196 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A6.T7\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A6.T7.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"A6.T7.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"A6.T7.3.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"A6.T7.3.1.1.2\">RPNN</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A6.T7.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"A6.T7.3.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"A6.T7.3.2.1.2\">0.1695s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A6.T7.3.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"A6.T7.3.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A6.T7.3.3.2.2\">17.7069s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 7: </span>Burgers: Computational time for Hybrid Parareal using five cores, and initial condition .</figcaption>\n</figure>",
|
| 197 |
+
"capture": "Table 7: Burgers: Computational time for Hybrid Parareal using five cores, and initial condition ."
|
| 198 |
+
},
|
| 199 |
+
"8": {
|
| 200 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A6.T8\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A6.T8.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"A6.T8.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"A6.T8.3.1.1.1\">Timing breakdown</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"A6.T8.3.1.1.2\">RPNN</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A6.T8.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_l ltx_border_rr ltx_border_tt\" id=\"A6.T8.3.2.1.1\">Average cost coarse step in the zeroth iterate</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"A6.T8.3.2.1.2\">0.3356s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A6.T8.3.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"A6.T8.3.3.2.1\">Average cost to produce the solution</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"A6.T8.3.3.2.2\">45.2056s</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 8: </span>Burgers: Computational time for Hybrid Parareal using five cores, and initial condition .</figcaption>\n</figure>",
|
| 201 |
+
"capture": "Table 8: Burgers: Computational time for Hybrid Parareal using five cores, and initial condition ."
|
| 202 |
+
}
|
| 203 |
+
},
|
| 204 |
+
"image_paths": {
|
| 205 |
+
"1": {
|
| 206 |
+
"figure_path": "2408.09756v1_figure_1.png",
|
| 207 |
+
"caption": "Figure 1: SIR: Hybrid Parareal solution with (left) a RPNN-based coarse propagator, (right) flow map coarse propagator.",
|
| 208 |
+
"url": "http://arxiv.org/html/2408.09756v1/x1.png"
|
| 209 |
+
},
|
| 210 |
+
"2": {
|
| 211 |
+
"figure_path": "2408.09756v1_figure_2.png",
|
| 212 |
+
"caption": "Figure 2: ROBER: Components of the\nHybrid Parareal solution. To plot all components on the same scale, \ud835\udc312subscript\ud835\udc312\\mathbf{x}_{2}bold_x start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT was scaled by a factor of 104superscript10410^{4}10 start_POSTSUPERSCRIPT 4 end_POSTSUPERSCRIPT.",
|
| 213 |
+
"url": "http://arxiv.org/html/2408.09756v1/x2.png"
|
| 214 |
+
},
|
| 215 |
+
"3": {
|
| 216 |
+
"figure_path": "2408.09756v1_figure_3.png",
|
| 217 |
+
"caption": "Figure 3: Lorenz: Hybrid Parareal solution with (left) uniform collocation points, (right) Lobatto collocation points.",
|
| 218 |
+
"url": "http://arxiv.org/html/2408.09756v1/x3.png"
|
| 219 |
+
},
|
| 220 |
+
"4": {
|
| 221 |
+
"figure_path": "2408.09756v1_figure_4.png",
|
| 222 |
+
"caption": "Figure 4: Arenstorf: Components of the\nHybrid Parareal solution (left), and the orbit of the initial condition (right).",
|
| 223 |
+
"url": "http://arxiv.org/html/2408.09756v1/x4.png"
|
| 224 |
+
},
|
| 225 |
+
"5": {
|
| 226 |
+
"figure_path": "2408.09756v1_figure_5.png",
|
| 227 |
+
"caption": "Figure 5: Burgers: Snapshots of the solution obtained with Hybrid Parareal (left), comparison of the solution surfaces between Hybrid Parareal and the fine integrator applied serially (right). Solution corresponding to u0\u2062(x)=sin\u2061(2\u2062\u03c0\u2062x)subscript\ud835\udc620\ud835\udc652\ud835\udf0b\ud835\udc65u_{0}(x)=\\sin{(2\\pi x)}italic_u start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT ( italic_x ) = roman_sin ( 2 italic_\u03c0 italic_x ).",
|
| 228 |
+
"url": "http://arxiv.org/html/2408.09756v1/x5.png"
|
| 229 |
+
},
|
| 230 |
+
"6": {
|
| 231 |
+
"figure_path": "2408.09756v1_figure_6.png",
|
| 232 |
+
"caption": "Figure 6: Qualitative accuracy of the predicted solutions using Parareal with RPNN.",
|
| 233 |
+
"url": "http://arxiv.org/html/2408.09756v1/x6.png"
|
| 234 |
+
},
|
| 235 |
+
"7": {
|
| 236 |
+
"figure_path": "2408.09756v1_figure_7.png",
|
| 237 |
+
"caption": "Figure 7: Burgers: Snapshots of the solution obtained with Hybrid Parareal (left), comparison of the solution surfaces between Hybrid Parareal and the fine integrator applied serially (right). Solution corresponding to u0\u2062(x)=x\u2062(1\u2212x)subscript\ud835\udc620\ud835\udc65\ud835\udc651\ud835\udc65u_{0}(x)=x(1-x)italic_u start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT ( italic_x ) = italic_x ( 1 - italic_x ).",
|
| 238 |
+
"url": "http://arxiv.org/html/2408.09756v1/x7.png"
|
| 239 |
+
},
|
| 240 |
+
"8": {
|
| 241 |
+
"figure_path": "2408.09756v1_figure_8.png",
|
| 242 |
+
"caption": "Figure 8: Burgers: Snapshots of the solution obtained with Hybrid Parareal (left), comparison of the solution surfaces between Hybrid Parareal and the fine integrator applied serially (right). Solution corresponding to u0\u2062(x)=sin\u2061(2\u2062\u03c0\u2062x)+cos\u2061(4\u2062\u03c0\u2062x)\u2212cos\u2061(8\u2062\u03c0\u2062x)subscript\ud835\udc620\ud835\udc652\ud835\udf0b\ud835\udc654\ud835\udf0b\ud835\udc658\ud835\udf0b\ud835\udc65u_{0}(x)=\\sin{(2\\pi x)}+\\cos{(4\\pi x)}-\\cos{(8\\pi x)}italic_u start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT ( italic_x ) = roman_sin ( 2 italic_\u03c0 italic_x ) + roman_cos ( 4 italic_\u03c0 italic_x ) - roman_cos ( 8 italic_\u03c0 italic_x ).",
|
| 243 |
+
"url": "http://arxiv.org/html/2408.09756v1/x8.png"
|
| 244 |
+
}
|
| 245 |
+
},
|
| 246 |
+
"validation": true,
|
| 247 |
+
"references": [],
|
| 248 |
+
"url": "http://arxiv.org/html/2408.09756v1"
|
| 249 |
+
}
|
20240819/2408.09765v1.json
ADDED
|
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Baby Bear: Seeking a Just Right Rating Scale for Scalar Annotations",
|
| 3 |
+
"abstract": "Our goal is to identify a mechanism for efficiently assigning scalar ratings to each of a large set of elements. For example, \u201cwhat percent positive or negative is this product review?\u201d When sample sizes are small, prior work has advocated for methods such as Best Worst Scaling (BWS) as being more robust than direct ordinal annotation (\u201dLikert scales\u201d). Here we first introduce IBWS, which iteratively collects annotations through Best-Worst Scaling, resulting in robustly ranked crowd-sourced data. While effective, IBWS is too expensive for large-scale tasks. Using the results of IBWS as a best-desired outcome, we evaluate various direct assessment methods to determine which are both cost-efficient and best correlates to a large scale BWS annotation strategy.\nFinally, we illustrate in the domains of dialogue and sentiment analysis how these annotations can drive robust learning-to-rank models for automated assessment.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Human annotations are crucial for improving model performance. With the rise of large language models (LLMs), the demand for large-scale human annotations has grown, particularly for pre-training, supervised fine-tuning (SFT) and incorporating human feedback in the rewards function (RLHF) (Devlin et al. 2019a ###reference_b7###; Chen et al. 2024 ###reference_b6###; Liang et al. 2024 ###reference_b21###). However, gathering reliable human annotations at scale is both expensive and time-consuming, making it crucial to develop strategies that can reduce these costs while ensuring the data\u2019s reliability. Additionally, many machine learning tasks\u2014such as web search, computer vision, recommender systems, dialogue systems, and machine translation\u2014rely on models that can effectively rank items or responses (Liu et al. 2009 ###reference_b23###; Weston, Bengio, and Usunier 2010 ###reference_b44###). Learning-to-rank (LTR) models, in particular, require training data with accurate rankings of large item sets, which can be challenging to obtain. To address this challenge, recent progress generally falls along two lines: optimizing annotation protocols or improving LTR models.\nUnder the first taxonomy, many efforts have been made to develop more effective annotation protocols that either produce higher-quality annotations or minimize the number of human annotations required (Sakaguchi and Van Durme 2018 ###reference_b35###; Mohankumar and Khapra 2022 ###reference_b28###; Mishra et al. 2022 ###reference_b27###; Lee et al. 2023 ###reference_b19###).\nHowever, this paradigm often fails to consider the connection between the collected annotations and subsequent learning-to-rank processes. For instance, Best Worst Scaling (BWS) can generate relative rankings within a small set of items: humans incrementally pick the best and worst in a small set, under some category. Yet BWS is expensive if seeking a global ranking across a large set of items.\nOn the other hand, model-in-the-loop ranking focuses on enhancing the model\u2019s ranking ability by redesigning its structure but can overlook the quality of the annotations (Xia et al. 2008 ###reference_b45###; Liu et al. 2009 ###reference_b23###; Shah and Wainwright 2016 ###reference_b38###). Our goal is to bridge this gap by identifying an annotation protocol that not only efficiently produces robust ranked annotations but can also be used to train an LTR model to predict rankings.\nMotivated by the fact that BWS is more effective than direct ordinal annotations (Louviere, Flynn, and Marley 1987 ###reference_b25###), in this study, we first introduce IBWS (Iterated Best-Worst Scaling), a novel ranking algorithm designed to generate reliable annotations by iteratively refining feedback from BWS. Although we show that IBWS is effective, its complexity makes it challenging for large-scale tasks. To address this, we evaluate various direct assessment methods and find that a simple slider protocol as the most reliable and efficient alternative using the results of IBWS as a best-desired outcome. Empirically, we demonstrate that a slider protocol closely aligns with IBWS rankings and ground truth. Furthermore, we train LTR models with collected slider annotations to automatically predict rankings, which is tested on two tasks: sentiment analysis and rating dialogue interactions. Our results highlight the effectiveness of the LTR models, which not only enhances the accuracy of model predictions but also reduces the time and cost associated with data collection, offering a scalable solution for many ML applications.\nThe main contributions of this study are:\nWe propose IBWS, an effective annotation collection algorithm that generates robust ranked annotations. To facilitate BWS annotations and empirically analyze the effectiveness of IBWS, we develop two interfaces: a standard two-column BWS interface and a vertical-drag interface;\nTo find a more practical alternative, we compare different direct assessment methods and identify the simple slider protocol as the most reliable, and efficient;\nWe further train LTR models to predict the annotations automatically, demonstrating their effectiveness on two tasks: sentiment analysis and rating dialogue interactions.\n###figure_1###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Background",
|
| 15 |
+
"text": "Three approaches are frequently used in surveys for sentiment data collection: direct assessment, pairwise ranking, and best-worst scaling (BWS)."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Methods",
|
| 21 |
+
"text": "We begin this section by introducing IBWS algorithm. Then, we discuss an LTR model to predict annotations."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Iterated Best-Worst Scaling",
|
| 27 |
+
"text": "To perform crowd-sourced ranking on BWS annotations, we develop the IBWS algorithm as explained in Algorithm 1 ###reference_###. Inspired by Quicksort (Hoare 1961 ###reference_b16###), we implement ranking by iteratively collecting annotations using BWS. We first assign all items to a single bucket from which we randomly sample 4 items without replacement. We manually label the best (max) and worst (min) elements. Motivated by quicksort comparisons on a single pivot, we perform BWS for every remaining element in the bucket: annotators are repeatedly given 4-tuples consisting of max, min and two randomly selected items, to then select a new max\u2019, min\u2019. This allows us to rank the two new items relative to the initial pair. The algorithm results in a multiplicative 3-way partition of the data after each iteration (buckets 0,1,2 as shown in Figure 4 ###reference_###). After iterations all items are placed in one of buckets. This can be considered a fine-grain ordinal scale. For example, 4 iterations of this approach leads to each element being assigned to one of 81 ordered buckets (ordinal labels).\n###figure_2### We consider two BWS interfaces to gather annotations:\nA standard BWS interface (Figure 2 ###reference_###) that presents four items sided with two columns of buttons for best and worst respectively (Potoglou et al. 2011 ###reference_b32###).\nTo better understand how the items are ranked from the annotator\u2019s perspective, annotators can indicate the relative sentiment distance between reviews on a vertical bar and rank reviews by dragging them vertically (Figure 3 ###reference_###)."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Learning-to-Rank Model",
|
| 33 |
+
"text": "To predict the annotations from IBWS ranking, we train an automated scoring LTR model using data with annotated scores. Specifically, the model predicts an output . We sample sentence pairs (, ) where is annotated more positive than . A pairwise hinge loss with parameterized margin is used to train the model,\nwhere is the constant margin, is the annotated sentiment score and is the ranking model\u2019s score prediction function. The loss encourages the model to score higher than .\nConsidering that annotators may be more calibrated on a per-HIT or per-worker basis than on a global basis when using Amazon MTurk annotation (Chen 2020 ###reference_b5###), we design pair grouping strategies, targeting to alleviate the disagreement between annotators and the inconsistency among tasks performed by the same annotator:\nGlobal basis With n annotations, each one is paired with k randomly selected samples, maintaining a total of pairs.\nGroup by HIT Samples are grouped by the HITId to guarantee only pairs that are annotated in the same HIT by the same worker are used as training data.\nGroup by worker To reduce the impact of differences between annotators, samples are grouped by WorkerId to guarantee only pairs annotated by the same worker are used as training data."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Experiments",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Data",
|
| 45 |
+
"text": "We randomly select reviews from the Amazon product review dataset111https://nijianmo.github.io/amazon/index.html (Ni, Li, and McAuley 2019 ###reference_b29###), ranging from four different product categories: Books, Electronics, Grocery-and-Gourmet-Food, and Home-and-Kitchen. Each review covers information about the rating (1-5 stars), review text, product id, and reviewer id.\n###figure_3###"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Collecting Annotations",
|
| 51 |
+
"text": "We perform annotation collection with Amazon Mechanical Turk (AMT).222Full details of annotation task is in Supplementary Material.\nIn an attempt to find the most robust and reliable scalar annotation protocols, we compare the collected annotations from 7-way ordinal, slider and VAS protocols on 100 sampled product reviews. For each review, we collect 10 annotations, resulting in 1000 annotations for each protocol. Inspired by Yrizarry, Matsumoto, and Wilson-Cohn (1998 ###reference_b47###), we design dual-category protocols to examine if using separate scales for positive and negative sentiments improves annotation reliability. As shown in Figure 5 ###reference_###, these protocols allow annotators to select from either positive or negative sentiment categories (i.e., Neutral Sentiment option is also available). In total, we collect annotations through six interfaces; each review will be presented either as a single question or in a dual question format.\nWe collect 4k annotations through two BWS interfaces with 3 iterations (include 100 reviews for direct assessment); only one worker is assigned to each task. The collected annotations are then ranked into 27 buckets and normalized to a [0, 1] scale (0 represents the most negative)."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Training the LTR Model",
|
| 57 |
+
"text": "The best scalar protocol determined from scalar annotation experiment is used to annotate training data: we collect another 4k annotations with 3-way redundancy to train an LTR model by fine-tuning the pre-trained RoBERTa base model.333Training details are in Supplementary Material.\nWe evaluate the performance of RoBERTa-LTR models by computing the Spearman\u2019s rank correlation () (Spearman 1904 ###reference_b40###) between IBWS ranking and LTR predictions to test how closely the model\u2019s rankings align with the IBWS annotations. Intra-class correlation coefficient (ICC) (Shrout and Fleiss 1979 ###reference_b39###) is used to evaluate the reliability of annotators."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.4",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Dialogue System Evaluation Experiments",
|
| 63 |
+
"text": "###figure_4### The dialogue data consists of 200 contexts and 40 responses for each context, for a total of 8000 context-response pairs. Each context has two conversational partners (A and B) speaking in turn, with A\u2019s sentence first, then B\u2019s response to A, and A\u2019s response to B (i.e. A-B-A). Each response is either a human-generated or a model-generated response from B to the last line of the conversation. For each context, 9 of the responses are written by humans, and 31 of the responses are generated by models. We use CakeChat444https://replika.ai/; DialoGPT (medium) (Zhang et al. 2020 ###reference_b48###); ConvAI2 (KV-MemNN) (Dinan et al. 2019 ###reference_b9###); Blender (single turn); Blender 2.7B (Roller et al. 2020 ###reference_b33###) with Person; ParlAI (Twitter 2); ParlAI (controllable) (See et al. 2019 ###reference_b37###); and Plato-2 (Bao et al. 2021 ###reference_b2###) (24 separate responses, from temperature ; top beam search size or top beam search ; and 2 responses per set of model parameters).\nSlider protocol is used to annotate the context-response pairs with the same setup. A subset of 2k context-response pairs was annotated with 3-way redundancy, while the rest (6k) were annotated without redundancy.\nThe same model is used to train on the context-response pairs. The context-response pairs were spliced with RoBERTa\u2019s sentence separator token to form training and evaluation items. A total of 16 models are trained, as described below. Of the 200 contexts, 120 are set aside for training, 40 for the dev set, and 40 for the test set. Of the training context-response items, for each of the models, half of the items are chosen by one of the following data splits: the response split, which has 60 contexts and 40 responses per context; the context split, which has 120 contexts and 20 responses per context; the worker95 split, which contains a random sample of items after filtering out annotations from the bottom 5% of workers; or the worker80 split, which contains a random sample of items after filtering out annotations from the bottom 20% of workers. The \u201cbottom\u201d percentage of workers is determined as follows: for each worker, a correlation score can be computed for each context-response pair that was in the subset of pairs annotated with redundancy; in particular, the correlation between the worker\u2019s annotations and the mean of the other two worker\u2019s annotations for each redundant pair is computed. The workers are then sorted by their correlation scores, and the annotations of the workers in the bottom 5% or 20% are filtered. For each data split, 4 models are trained on the same pairwise hinge loss function, except instead of grouping by the HITId, samples are grouped by the ContextId.\n###figure_5### ###figure_6### ###figure_7###"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Results and Analysis",
|
| 69 |
+
"text": ""
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5.1",
|
| 73 |
+
"parent_section_id": "5",
|
| 74 |
+
"section_name": "Effectiveness of IBWS",
|
| 75 |
+
"text": "To evaluate the reliability of IBWS and confirm its inheritance of BWS\u2019s robustness, we compute the Spearman\u2019s correlation between the rankings generated from IBWS and the average true ordinal labels within each bucket, as depicted in Figure 7 ###reference_###. The observed consistent, monotonically increasing trend across all product types confirms that IBWS effectively ranks the reviews as intended.\nHowever, several factors contribute to why the plots are not perfectly sorted: 1) poor-quality responses; 2) annotators might focus on different aspects than the ground-truth ratings (e.g., prioritizing certain attributes that differ from those emphasized by other reviewers); and 3) the buckets may not align in a strictly linear fashion with the ground truth ratings.\nBy comparing the results from the standard two-column BWS interface and the vertical-drag interface (See Supplementary), we find that annotators performed better with the standard two-column setup. Although both interfaces produce a monotonic relationship, the annotations from the standard two-column interface show less variance relative to the true ordinal ratings in each bucket. Additionally, the vertical-drag interface results in more outliers being misclassified."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5.2",
|
| 79 |
+
"parent_section_id": "5",
|
| 80 |
+
"section_name": "Rating Scale Performance",
|
| 81 |
+
"text": "To examine how consistently we get similar rankings from every direct assessment protocol, we employ the random split-half (Kiritchenko and Mohammad 2017 ###reference_b18###) with Spearman\u2019s correlation score. Specifically, for each review, we randomly sample two annotations out of ten to form lists A and B respectively. Ties in the resulting rankings are broken by adding a small amount of random noise, and Spearman\u2019s correlation is computed between A and B. As illustrated in Figure 8 ###reference_###, single slider and single ordinal protocols yield the highest consistency.\nTable 1 ###reference_### presents the ICC across various scalar annotation protocols. The results align with the findings from Figure 8 ###reference_###; the single-category ordinal and slider interfaces perform more reliably and efficiently than the others.\nTable 2 ###reference_### compares the correlation of each scalar annotations (i.e., the mean of 10 redundant annotations) with the ground truth values from the original Amazon review dataset. All three single-category interfaces outperform the dual-category ones, with the single slider and ordinal scales showing better correlation than the VAS scale. Additionally, as shown in Figure 9 ###reference_###, the single slider annotations are most concentrated along the diagonal, indicating the strongest alignment with the ground-truth labels.\n###figure_8### ###figure_9### ###figure_10### Figure 10 ###reference_0### illustrates the correlation between IBWS-ranked annotations and scalar annotations, where a single annotation is randomly selected from the 10 redundant annotations for each review. At zero redundancy (i.e., when only one annotator\u2019s input is considered), the slider interfaces show noticeably higher correlations compared to the other two types. We also find that when redundancy increases, the correlation gradually increases across all scalar protocols, and all product types, as shown in Figure 11 ###reference_1###.\nFigure 12 ###reference_2### plots the annotation time taken by workers to rate 5 reviews across all scalar interfaces. Overall, the single slider method was most efficient for the sentiment annotation task. The dual interfaces all took longer than their single counterparts."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.3",
|
| 85 |
+
"parent_section_id": "5",
|
| 86 |
+
"section_name": "Model Performance",
|
| 87 |
+
"text": "Table 4 ###reference_### presents the performance of the RoBERTa-LTR model trained on different sizes of sentiment annotations collected from the slider protocol (500, 1k, 2k, and 4k) and tested on the 4k IBWS annotations. We observe that as the number of training annotations increases, the prediction accuracy improves, and the performance gap between the three pairwise strategies narrows. When the training dataset is small, the global pairwise strategy significantly outperforms the other two settings, likely due to the difference in the number of training pairs. However, this approach requires six times more training time. Once the model is trained on more than 2,000 annotations, all approaches\u2014global, per-context, and per-worker\u2014achieve a correlation accuracy above 0.7, indicating the model is well-trained to predict rankings.\nTable 3 ###reference_### shows that the RoBERTa-LTR model achieves a Spearman\u2019s correlation of 0\u0303.3 on dialogue annotations, which is significantly lower than on sentiment data. However, the inter-annotator correlation on the redundantly-annotated subset is also 0.3, implying that the models are approaching human performance.\nWith regards to the performance between the dialogue models, across the various data splits, the per-worker models tend to perform on par with the global models, while the pointwise models perform on par with the per-context models, with the latter pair outperforming the former. The data split is not found to have a significant effect on model performance, especially with the random noise in performance from the randomized order of the training inputs."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Related Work",
|
| 93 |
+
"text": ""
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "7",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "Best Worst Scaling (BWS) is a respected annotation procedure on small datasets. We introduced Iterated BWS as a robust method for crowdsourced annotation of larger collections. While robust, IBWS requires repeated consideration of each element in the collection: iterations translates to times the cost. We illustrated that a direct scalar assessment of each element using a slider protocol allows for significantly more efficient annotation, while giving similar results to IBWS.\nThese annotations support training automated pairwise ranking models: in both sentiment analysis and dialogue tasks, the LTR models effectively predict rankings on par with human annotations.\nTo our knowledge, this study is the first to directly consider the widely regarded BWS protocol in the context of large datasets and with an eye to practical considerations of annotation costs. Our results support the conclusion that researchers can comfortably rely on a direct scalar assessment protocol as a more efficient and similarly robust approach."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [],
|
| 103 |
+
"tables": {
|
| 104 |
+
"1": {
|
| 105 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx4.T1\">\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"Sx4.T1.1\" style=\"width:433.6pt;height:226.8pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(96.4pt,-50.4pt) scale(1.79987302340099,1.79987302340099) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"Sx4.T1.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.1\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.2\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.3\">ICC1</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.4\">ICC3</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.5\">ICC1k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"Sx4.T1.1.1.1.1.6\">ICC3k</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"Sx4.T1.1.1.2.1.1\">Single</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T1.1.1.2.1.2\">Ordinal</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.2.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.2.1.3.1\" style=\"background-color:#F2E6F2;\">0.74</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.2.1.4\">0.77</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.2.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.2.1.5.1\" style=\"background-color:#F2E6F2;\">0.96</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.2.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.2.1.6.1\" style=\"background-color:#F2E6F2;\">0.97</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.3.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"Sx4.T1.1.1.3.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T1.1.1.3.2.2\">Slider</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.3.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.3.2.3.1\" style=\"background-color:#F2E6F2;\">0.74</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.3.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.3.2.4.1\" style=\"background-color:#F2E6F2;\">0.78</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.3.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.3.2.5.1\" style=\"background-color:#F2E6F2;\">0.96</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.3.2.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T1.1.1.3.2.6.1\" style=\"background-color:#F2E6F2;\">0.97</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.4.3\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"Sx4.T1.1.1.4.3.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T1.1.1.4.3.2\">VAS</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.4.3.3\">0.64</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.4.3.4\">0.68</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.4.3.5\">0.94</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.4.3.6\">0.95</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.5.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"Sx4.T1.1.1.5.4.1\">Dual</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"Sx4.T1.1.1.5.4.2\">Ordinal</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.5.4.3\">0.60</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.5.4.4\">0.62</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.5.4.5\">0.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx4.T1.1.1.5.4.6\">0.92</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.6.5\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"Sx4.T1.1.1.6.5.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T1.1.1.6.5.2\">Slider</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.6.5.3\">0.65</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.6.5.4\">0.66</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.6.5.5\">0.94</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T1.1.1.6.5.6\">0.95</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T1.1.1.7.6\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_bb\" id=\"Sx4.T1.1.1.7.6.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"Sx4.T1.1.1.7.6.2\">VAS</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx4.T1.1.1.7.6.3\">0.65</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx4.T1.1.1.7.6.4\">0.66</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx4.T1.1.1.7.6.5\">0.93</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx4.T1.1.1.7.6.6\">0.94</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>ICC scores on annotations across all scalar protocols.</figcaption>\n</figure>",
|
| 106 |
+
"capture": "Table 1: ICC scores on annotations across all scalar protocols."
|
| 107 |
+
},
|
| 108 |
+
"2": {
|
| 109 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx4.T2\">\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"Sx4.T2.3\" style=\"width:433.6pt;height:93.3pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(91.3pt,-19.6pt) scale(1.72756686606203,1.72756686606203) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"Sx4.T2.3.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx4.T2.3.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_tt\" colspan=\"3\" id=\"Sx4.T2.3.1.1.1.1\">Single</th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"3\" id=\"Sx4.T2.3.1.1.1.2\">Dual</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T2.3.1.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"Sx4.T2.3.1.2.2.1\">Ordinal</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"Sx4.T2.3.1.2.2.2\">Slider</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"Sx4.T2.3.1.2.2.3\">VAS</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T2.3.1.2.2.4\">Ordinal</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T2.3.1.2.2.5\">Slider</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx4.T2.3.1.2.2.6\">VAS</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx4.T2.3.1.3.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_t\" id=\"Sx4.T2.3.1.3.3.1\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T2.3.1.3.3.1.1\" style=\"background-color:#F2E6F2;\">0.881</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_t\" id=\"Sx4.T2.3.1.3.3.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx4.T2.3.1.3.3.2.1\" style=\"background-color:#F2E6F2;\">0.881</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r ltx_border_t\" id=\"Sx4.T2.3.1.3.3.3\">0.877</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"Sx4.T2.3.1.3.3.4\">0.828</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"Sx4.T2.3.1.3.3.5\">0.872</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"Sx4.T2.3.1.3.3.6\">0.879</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Spearman correlation () between scalar annotations and true labels.</figcaption>\n</figure>",
|
| 110 |
+
"capture": "Table 2: Spearman correlation () between scalar annotations and true labels."
|
| 111 |
+
},
|
| 112 |
+
"3": {
|
| 113 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx5.T3\">\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"Sx5.T3.3\" style=\"width:346.9pt;height:99.1pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-15.5pt,4.4pt) scale(0.918053281043585,0.918053281043585) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"Sx5.T3.3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"Sx5.T3.3.1.1.1.1\">Model / Data Split</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"Sx5.T3.3.1.1.1.2\">response</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"Sx5.T3.3.1.1.1.3\">context</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"Sx5.T3.3.1.1.1.4\">worker95</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"Sx5.T3.3.1.1.1.5\">worker80</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"Sx5.T3.3.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.2\">dev</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.3\">test</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.4\">dev</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.5\">test</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.6\">dev</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.7\">test</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.8\">dev</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"Sx5.T3.3.1.2.2.9\">test</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"Sx5.T3.3.1.3.1.1\">pointwise</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.3.1.2.1\" style=\"background-color:#F2E6F2;\">35.04</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.3.1.3.1\" style=\"background-color:#F2E6F2;\">32.17</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.3.1.4.1\" style=\"background-color:#F2E6F2;\">35.91</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.3.1.5.1\" style=\"background-color:#F2E6F2;\">31.68</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.6\">32.84</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.7\">30.41</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.8\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.3.1.8.1\" style=\"background-color:#F2E6F2;\">35.59</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"Sx5.T3.3.1.3.1.9\">31.93</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"Sx5.T3.3.1.4.2.1\">global</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.2\">27.92</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.3\">26.88</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.4\">30.20</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.5\">28.33</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.6\">31.63</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.7\">26.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.8\">31.97</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.4.2.9\">31.26</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"Sx5.T3.3.1.5.3.1\">per-worker</th>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.2\">29.46</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.3\">26.87</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.4\">33.52</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.5\">29.64</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.6\">34.09</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.7\">29.36</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.8\">32.12</td>\n<td class=\"ltx_td ltx_align_center\" id=\"Sx5.T3.3.1.5.3.9\">29.79</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T3.3.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.1\">per-context</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.2\">33.16</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.3\">30.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.4\">35.59</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.5\">30.93</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.6\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.6.4.6.1\" style=\"background-color:#F2E6F2;\">35.50</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.7\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.6.4.7.1\" style=\"background-color:#F2E6F2;\">31.97</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.8\">35.53</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"Sx5.T3.3.1.6.4.9\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T3.3.1.6.4.9.1\" style=\"background-color:#F2E6F2;\">31.99</span></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Spearman\u2019s correlation () of RoBERTa-LTR models trained and evaluated on dialogue data.</figcaption>\n</figure>",
|
| 114 |
+
"capture": "Table 3: Spearman\u2019s correlation () of RoBERTa-LTR models trained and evaluated on dialogue data."
|
| 115 |
+
},
|
| 116 |
+
"4": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"Sx5.T4\">\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"Sx5.T4.3\" style=\"width:433.6pt;height:125.3pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(92.2pt,-26.6pt) scale(1.73969609564223,1.73969609564223) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"Sx5.T4.3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"Sx5.T4.3.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"Sx5.T4.3.1.1.1.1\">Model / Training size</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"Sx5.T4.3.1.1.1.2\">500</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"Sx5.T4.3.1.1.1.3\">1000</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"Sx5.T4.3.1.1.1.4\">2000</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt\" id=\"Sx5.T4.3.1.1.1.5\">4000</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"Sx5.T4.3.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"Sx5.T4.3.1.2.1.1\">global</th>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"Sx5.T4.3.1.2.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T4.3.1.2.1.2.1\" style=\"background-color:#F2E6F2;\">66.29</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"Sx5.T4.3.1.2.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T4.3.1.2.1.3.1\" style=\"background-color:#F2E6F2;\">69.94</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"Sx5.T4.3.1.2.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T4.3.1.2.1.4.1\" style=\"background-color:#F2E6F2;\">71.86</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"Sx5.T4.3.1.2.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"Sx5.T4.3.1.2.1.5.1\" style=\"background-color:#F2E6F2;\">72.56</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T4.3.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"Sx5.T4.3.1.3.2.1\">per-HIT</th>\n<td class=\"ltx_td ltx_align_left\" id=\"Sx5.T4.3.1.3.2.2\">60.43</td>\n<td class=\"ltx_td ltx_align_left\" id=\"Sx5.T4.3.1.3.2.3\">65.57</td>\n<td class=\"ltx_td ltx_align_left\" id=\"Sx5.T4.3.1.3.2.4\">71.58</td>\n<td class=\"ltx_td ltx_align_left\" id=\"Sx5.T4.3.1.3.2.5\">72.52</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"Sx5.T4.3.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"Sx5.T4.3.1.4.3.1\">per-worker</th>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"Sx5.T4.3.1.4.3.2\">59.45</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"Sx5.T4.3.1.4.3.3\">64.39</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"Sx5.T4.3.1.4.3.4\">70.92</td>\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"Sx5.T4.3.1.4.3.5\">72.18</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Spearman\u2019s correlation () of RoBERTa-LTR model predictions, evaluated on IBWS sentiment annotations.</figcaption>\n</figure>",
|
| 118 |
+
"capture": "Table 4: Spearman\u2019s correlation () of RoBERTa-LTR model predictions, evaluated on IBWS sentiment annotations."
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
"image_paths": {
|
| 122 |
+
"1": {
|
| 123 |
+
"figure_path": "2408.09765v1_figure_1.png",
|
| 124 |
+
"caption": "Figure 1: Direct assessment protocols for sentiment.",
|
| 125 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/exp1-1.png"
|
| 126 |
+
},
|
| 127 |
+
"2": {
|
| 128 |
+
"figure_path": "2408.09765v1_figure_2.png",
|
| 129 |
+
"caption": "Figure 2: BWS protocol on Amazon review sentiment.",
|
| 130 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/bws_interface.png"
|
| 131 |
+
},
|
| 132 |
+
"3": {
|
| 133 |
+
"figure_path": "2408.09765v1_figure_3.png",
|
| 134 |
+
"caption": "Figure 3: Vert-drag BWS interface.",
|
| 135 |
+
"url": "http://arxiv.org/html/2408.09765v1/x1.png"
|
| 136 |
+
},
|
| 137 |
+
"4": {
|
| 138 |
+
"figure_path": "2408.09765v1_figure_4.png",
|
| 139 |
+
"caption": "Figure 4: An illustration of IBWS algorithm.",
|
| 140 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/bws_explanation.png"
|
| 141 |
+
},
|
| 142 |
+
"5": {
|
| 143 |
+
"figure_path": "2408.09765v1_figure_5.png",
|
| 144 |
+
"caption": "Figure 5: Likert Style, dual-question Protocols.",
|
| 145 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/exp1example.png"
|
| 146 |
+
},
|
| 147 |
+
"6": {
|
| 148 |
+
"figure_path": "2408.09765v1_figure_6.png",
|
| 149 |
+
"caption": "Figure 6: LTR model on dialogue system outputs.",
|
| 150 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/dialogue-model.png"
|
| 151 |
+
},
|
| 152 |
+
"7": {
|
| 153 |
+
"figure_path": "2408.09765v1_figure_7.png",
|
| 154 |
+
"caption": "Figure 7: Normalized IBWS annotations correlate with average ground truth labels.",
|
| 155 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/ibws_effective.png"
|
| 156 |
+
},
|
| 157 |
+
"8": {
|
| 158 |
+
"figure_path": "2408.09765v1_figure_8.png",
|
| 159 |
+
"caption": "Figure 8: Spearman\u2019s correlation of random split-half rankings. From top to bottom: single slider, single ordinal, dual slider, dual VAS, single VAS, dual ordinal.",
|
| 160 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/split-half.png"
|
| 161 |
+
},
|
| 162 |
+
"9": {
|
| 163 |
+
"figure_path": "2408.09765v1_figure_9.png",
|
| 164 |
+
"caption": "Figure 9: Heatmaps of annotated rating score correlating with ground truth across the scalar interfaces.",
|
| 165 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/scalar_truth.png"
|
| 166 |
+
},
|
| 167 |
+
"10": {
|
| 168 |
+
"figure_path": "2408.09765v1_figure_10.png",
|
| 169 |
+
"caption": "Figure 10: Spearman\u2019s correlation (\u03c1\ud835\udf0c\\rhoitalic_\u03c1) across scalar interfaces with IBWS annotations at zero redundancy (AR = 1).",
|
| 170 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/no-redundancy.png"
|
| 171 |
+
},
|
| 172 |
+
"11": {
|
| 173 |
+
"figure_path": "2408.09765v1_figure_11.png",
|
| 174 |
+
"caption": "Figure 11: Median Spearman\u2019s correlation (\u03c1\ud835\udf0c\\rhoitalic_\u03c1) between IBWS and scalar annotations across protocols and product types.",
|
| 175 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/corr_ptype.png"
|
| 176 |
+
},
|
| 177 |
+
"12": {
|
| 178 |
+
"figure_path": "2408.09765v1_figure_12.png",
|
| 179 |
+
"caption": "Figure 12: Work time of scalar interfaces in seconds.",
|
| 180 |
+
"url": "http://arxiv.org/html/2408.09765v1/extracted/5799475/fig/scalar_worktime2.png"
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
"validation": true,
|
| 184 |
+
"references": [
|
| 185 |
+
{
|
| 186 |
+
"1": {
|
| 187 |
+
"title": "The use of rating and Likert scales in Natural Language Generation human evaluation tasks: A review and some recommendations.",
|
| 188 |
+
"author": "Amidei, J.; Piwek, P.; and Willis, A. 2019.",
|
| 189 |
+
"venue": "In INLG.",
|
| 190 |
+
"url": null
|
| 191 |
+
}
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"2": {
|
| 195 |
+
"title": "PLATO-2: Towards Building an Open-Domain Chatbot via Curriculum Learning.",
|
| 196 |
+
"author": "Bao, S.; He, H.; Wang, F.; Wu, H.; Wang, H.; Wu, W.; Guo, Z.; Liu, Z.; and Xu, X. 2021.",
|
| 197 |
+
"venue": "arXiv:2006.16779.",
|
| 198 |
+
"url": null
|
| 199 |
+
}
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"3": {
|
| 203 |
+
"title": "Discrete vs. Continuous Rating Scales for Language Evaluation in NLP.",
|
| 204 |
+
"author": "Belz, A.; and Kow, E. 2011.",
|
| 205 |
+
"venue": "In Lin, D.; Matsumoto, Y.; and Mihalcea, R., eds., Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, 230\u2013235. Portland, Oregon, USA: Association for Computational Linguistics.",
|
| 206 |
+
"url": null
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"4": {
|
| 211 |
+
"title": "Best-worst scaling improves measurement of first impressions.",
|
| 212 |
+
"author": "Burton, N.; Burton, M.; Rigby, D.; Sutherland, C.; and Rhodes, G. 2019.",
|
| 213 |
+
"venue": "Cognitive Research: Principles and Implications, 4(1).",
|
| 214 |
+
"url": null
|
| 215 |
+
}
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"5": {
|
| 219 |
+
"title": "RANKING AND RETRIEVAL UNDER SEMANTIC RELEVANCE.",
|
| 220 |
+
"author": "Chen, T. 2020.",
|
| 221 |
+
"venue": "Ph.D. thesis, Johns Hopkins University.",
|
| 222 |
+
"url": null
|
| 223 |
+
}
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"6": {
|
| 227 |
+
"title": "Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models.",
|
| 228 |
+
"author": "Chen, Z.; Deng, Y.; Yuan, H.; Ji, K.; and Gu, Q. 2024.",
|
| 229 |
+
"venue": "arXiv:2401.01335.",
|
| 230 |
+
"url": null
|
| 231 |
+
}
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"7": {
|
| 235 |
+
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding.",
|
| 236 |
+
"author": "Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2019a.",
|
| 237 |
+
"venue": "arXiv:1810.04805.",
|
| 238 |
+
"url": null
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"8": {
|
| 243 |
+
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding.",
|
| 244 |
+
"author": "Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2019b.",
|
| 245 |
+
"venue": "arXiv:1810.04805.",
|
| 246 |
+
"url": null
|
| 247 |
+
}
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"9": {
|
| 251 |
+
"title": "The Second Conversational Intelligence Challenge (ConvAI2).",
|
| 252 |
+
"author": "Dinan, E.; Logacheva, V.; Malykh, V.; Miller, A.; Shuster, K.; Urbanek, J.; Kiela, D.; Szlam, A.; Serban, I.; Lowe, R.; Prabhumoye, S.; Black, A. W.; Rudnicky, A.; Williams, J.; Pineau, J.; Burtsev, M.; and Weston, J. 2019.",
|
| 253 |
+
"venue": "arXiv:1902.00098.",
|
| 254 |
+
"url": null
|
| 255 |
+
}
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"10": {
|
| 259 |
+
"title": "Rapid Adaptation of NE Resolvers for Humanities Domains using Active Annotation.",
|
| 260 |
+
"author": "Ekbal, A.; Bonin, F.; Saha, S.; Stemle, E.; Barbu, E.; Cavulli, F.; Girardi, C.; and Poesio, M. 2011.",
|
| 261 |
+
"venue": "Language Technology and Computational Linguistics.",
|
| 262 |
+
"url": null
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"11": {
|
| 267 |
+
"title": "Best\u2013worst scaling: What it can do for health care research and how to do it.",
|
| 268 |
+
"author": "Flynn, T. N.; Louviere, J. J.; Peters, T. J.; and Coast, J. 2007.",
|
| 269 |
+
"venue": "Journal of Health Economics, 26(1): 171 \u2013 189.",
|
| 270 |
+
"url": null
|
| 271 |
+
}
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"12": {
|
| 275 |
+
"title": "Language Modelling via Learning to Rank.",
|
| 276 |
+
"author": "Frydenlund, A.; Singh, G.; and Rudzicz, F. 2022.",
|
| 277 |
+
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence, 36(10): 10636\u201310644.",
|
| 278 |
+
"url": null
|
| 279 |
+
}
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"13": {
|
| 283 |
+
"title": "The Future of Survey Self-report: An experiment contrasting Likert, VAS, Slide, and Swipe touch interfaces.",
|
| 284 |
+
"author": "Fryer, L.; and Nakao, K. 2020.",
|
| 285 |
+
"venue": "FRONTLINE LEARNING RESEARCH, 10\u201325.",
|
| 286 |
+
"url": null
|
| 287 |
+
}
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"14": {
|
| 291 |
+
"title": "The Viability of Best-worst Scaling and Categorical Data Label Annotation Tasks in Detecting Implicit Bias.",
|
| 292 |
+
"author": "Glenn, P.; Jacobs, C. L.; Thielk, M.; and Chu, Y. 2022.",
|
| 293 |
+
"venue": "In Abercrombie, G.; Basile, V.; Tonelli, S.; Rieser, V.; and Uma, A., eds., Proceedings of the 1st Workshop on Perspectivist Approaches to NLP @LREC2022, 32\u201336. Marseille, France: European Language Resources Association.",
|
| 294 |
+
"url": null
|
| 295 |
+
}
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"15": {
|
| 299 |
+
"title": "Learning-to-Rank with BERT in TF-Ranking.",
|
| 300 |
+
"author": "Han, S.; Wang, X.; Bendersky, M.; and Najork, M. 2020.",
|
| 301 |
+
"venue": "arXiv:2004.08476.",
|
| 302 |
+
"url": null
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"16": {
|
| 307 |
+
"title": "Algorithm 64, Quicksort.",
|
| 308 |
+
"author": "Hoare, T. 1961.",
|
| 309 |
+
"venue": "Communications of The ACM, 4.",
|
| 310 |
+
"url": null
|
| 311 |
+
}
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"17": {
|
| 315 |
+
"title": "Ridit analysis, a review.",
|
| 316 |
+
"author": "Jansen, M. E. 1984.",
|
| 317 |
+
"venue": "Statistica Neerlandica, 38(3): 141\u2013158.",
|
| 318 |
+
"url": null
|
| 319 |
+
}
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"18": {
|
| 323 |
+
"title": "Best-Worst Scaling More Reliable than Rating Scales: A Case Study on Sentiment Intensity Annotation.",
|
| 324 |
+
"author": "Kiritchenko, S.; and Mohammad, S. 2017.",
|
| 325 |
+
"venue": "In Barzilay, R.; and Kan, M.-Y., eds., Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), 465\u2013470. Vancouver, Canada: Association for Computational Linguistics.",
|
| 326 |
+
"url": null
|
| 327 |
+
}
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"19": {
|
| 331 |
+
"title": "Common Law Annotations: Investigating the Stability of Dialog System Output Annotations.",
|
| 332 |
+
"author": "Lee, S.; DeLucia, A.; Nangia, N.; Ganedi, P.; Guan, R.; Li, R.; Ngaw, B.; Singhal, A.; Vaidya, S.; Yuan, Z.; Zhang, L.; and Sedoc, J. 2023.",
|
| 333 |
+
"venue": "In Rogers, A.; Boyd-Graber, J.; and Okazaki, N., eds., Findings of the Association for Computational Linguistics: ACL 2023, 12315\u201312349. Toronto, Canada: Association for Computational Linguistics.",
|
| 334 |
+
"url": null
|
| 335 |
+
}
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"20": {
|
| 339 |
+
"title": "Learning to Rank in Generative Retrieval.",
|
| 340 |
+
"author": "Li, Y.; Yang, N.; Wang, L.; Wei, F.; and Li, W. 2023.",
|
| 341 |
+
"venue": "arXiv:2306.15222.",
|
| 342 |
+
"url": null
|
| 343 |
+
}
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"21": {
|
| 347 |
+
"title": "Rich Human Feedback for Text-to-Image Generation.",
|
| 348 |
+
"author": "Liang, Y.; He, J.; Li, G.; Li, P.; Klimovskiy, A.; Carolan, N.; Sun, J.; Pont-Tuset, J.; Young, S.; Yang, F.; Ke, J.; Dvijotham, K. D.; Collins, K.; Luo, Y.; Li, Y.; Kohlhoff, K. J.; Ramachandran, D.; and Navalpakkam, V. 2024.",
|
| 349 |
+
"venue": "arXiv:2312.10240.",
|
| 350 |
+
"url": null
|
| 351 |
+
}
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"22": {
|
| 355 |
+
"title": "A technique for the measurement of attitudes.",
|
| 356 |
+
"author": "Likert, R. 1987.",
|
| 357 |
+
"venue": "Archives of Psychology, Columbia University.",
|
| 358 |
+
"url": null
|
| 359 |
+
}
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"23": {
|
| 363 |
+
"title": "Learning to rank for information retrieval.",
|
| 364 |
+
"author": "Liu, T.-Y.; et al. 2009.",
|
| 365 |
+
"venue": "Foundations and Trends\u00ae in Information Retrieval, 3(3): 225\u2013331.",
|
| 366 |
+
"url": null
|
| 367 |
+
}
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"24": {
|
| 371 |
+
"title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach.",
|
| 372 |
+
"author": "Liu, Y.; Ott, M.; Goyal, N.; Du, J.; Joshi, M.; Chen, D.; Levy, O.; Lewis, M.; Zettlemoyer, L.; and Stoyanov, V. 2019.",
|
| 373 |
+
"venue": "arXiv:1907.11692.",
|
| 374 |
+
"url": null
|
| 375 |
+
}
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"25": {
|
| 379 |
+
"title": "Best-Worst Scaling: Theory, Methods and Applications.",
|
| 380 |
+
"author": "Louviere, J. J.; Flynn, T. N.; and Marley, A. A. J. 1987.",
|
| 381 |
+
"venue": "Cambridge University Press.",
|
| 382 |
+
"url": null
|
| 383 |
+
}
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"26": {
|
| 387 |
+
"title": "Model-based Unbiased Learning to Rank.",
|
| 388 |
+
"author": "Luo, D.; Zou, L.; Ai, Q.; Chen, Z.; Yin, D.; and Davison, B. D. 2023.",
|
| 389 |
+
"venue": "arXiv:2207.11785.",
|
| 390 |
+
"url": null
|
| 391 |
+
}
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"27": {
|
| 395 |
+
"title": "Cross-Task Generalization via Natural Language Crowdsourcing Instructions.",
|
| 396 |
+
"author": "Mishra, S.; Khashabi, D.; Baral, C.; and Hajishirzi, H. 2022.",
|
| 397 |
+
"venue": "arXiv:2104.08773.",
|
| 398 |
+
"url": null
|
| 399 |
+
}
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"28": {
|
| 403 |
+
"title": "Active Evaluation: Efficient NLG Evaluation with Few Pairwise Comparisons.",
|
| 404 |
+
"author": "Mohankumar, A. K.; and Khapra, M. M. 2022.",
|
| 405 |
+
"venue": "arXiv:2203.06063.",
|
| 406 |
+
"url": null
|
| 407 |
+
}
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"29": {
|
| 411 |
+
"title": "Justifying Recommendations using Distantly-Labeled Reviews and Fine-Grained Aspects.",
|
| 412 |
+
"author": "Ni, J.; Li, J.; and McAuley, J. J. 2019.",
|
| 413 |
+
"venue": "In EMNLP/IJCNLP.",
|
| 414 |
+
"url": null
|
| 415 |
+
}
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"30": {
|
| 419 |
+
"title": "RankME: Reliable Human Ratings for Natural Language Generation.",
|
| 420 |
+
"author": "Novikova, J.; Du\u0161ek, O.; and Rieser, V. 2018.",
|
| 421 |
+
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers).",
|
| 422 |
+
"url": null
|
| 423 |
+
}
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"31": {
|
| 427 |
+
"title": "MaxDiff Analysis : Simple Counting , Individual-Level Logit , and HB.",
|
| 428 |
+
"author": "Orme, B. 2009.",
|
| 429 |
+
"venue": "In Sawtooth Software.",
|
| 430 |
+
"url": null
|
| 431 |
+
}
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"32": {
|
| 435 |
+
"title": "Best\u2013worst scaling vs. discrete choice experiments: An empirical comparison using social care data.",
|
| 436 |
+
"author": "Potoglou, D.; Burge, P.; Flynn, T.; Netten, A.; Malley, J.; Forder, J.; and Brazier, J. E. 2011.",
|
| 437 |
+
"venue": "Social Science & Medicine, 72(10): 1717 \u2013 1727.",
|
| 438 |
+
"url": null
|
| 439 |
+
}
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"33": {
|
| 443 |
+
"title": "Recipes for building an open-domain chatbot.",
|
| 444 |
+
"author": "Roller, S.; Dinan, E.; Goyal, N.; Ju, D.; Williamson, M.; Liu, Y.; Xu, J.; Ott, M.; Shuster, K.; Smith, E. M.; Boureau, Y.-L.; and Weston, J. 2020.",
|
| 445 |
+
"venue": "arXiv:2004.13637.",
|
| 446 |
+
"url": null
|
| 447 |
+
}
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"34": {
|
| 451 |
+
"title": "Exploring Slider vs. Categorical Response Formats in Web-Based Surveys.",
|
| 452 |
+
"author": "Roster, C.; Lucianetti, L.; and Albaum, G. 2015.",
|
| 453 |
+
"venue": "Journal of Research Practice, 11.",
|
| 454 |
+
"url": null
|
| 455 |
+
}
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"35": {
|
| 459 |
+
"title": "Efficient Online Scalar Annotation with Bounded Support.",
|
| 460 |
+
"author": "Sakaguchi, K.; and Van Durme, B. 2018.",
|
| 461 |
+
"venue": "In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL).",
|
| 462 |
+
"url": null
|
| 463 |
+
}
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"36": {
|
| 467 |
+
"title": "Towards Best Experiment Design for Evaluating Dialogue System Output.",
|
| 468 |
+
"author": "Santhanam, S.; and Shaikh, S. 2019.",
|
| 469 |
+
"venue": "In van Deemter, K.; Lin, C.; and Takamura, H., eds., Proceedings of the 12th International Conference on Natural Language Generation, 88\u201394. Tokyo, Japan: Association for Computational Linguistics.",
|
| 470 |
+
"url": null
|
| 471 |
+
}
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"37": {
|
| 475 |
+
"title": "What makes a good conversation? How controllable attributes affect human judgments.",
|
| 476 |
+
"author": "See, A.; Roller, S.; Kiela, D.; and Weston, J. 2019.",
|
| 477 |
+
"venue": "arXiv:1902.08654.",
|
| 478 |
+
"url": null
|
| 479 |
+
}
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"38": {
|
| 483 |
+
"title": "Simple, Robust and Optimal Ranking from Pairwise Comparisons.",
|
| 484 |
+
"author": "Shah, N. B.; and Wainwright, M. J. 2016.",
|
| 485 |
+
"venue": "arXiv:1512.08949.",
|
| 486 |
+
"url": null
|
| 487 |
+
}
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"39": {
|
| 491 |
+
"title": "Intraclass correlations: uses in assessing rater reliability.",
|
| 492 |
+
"author": "Shrout, P.; and Fleiss, J. 1979.",
|
| 493 |
+
"venue": "Psychological bulletin, 86 2: 420\u20138.",
|
| 494 |
+
"url": null
|
| 495 |
+
}
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"40": {
|
| 499 |
+
"title": "The Proof and Measurement of Association Between Two Things.",
|
| 500 |
+
"author": "Spearman, C. 1904.",
|
| 501 |
+
"venue": "American Journal of Psychology, 15: 88\u2013103.",
|
| 502 |
+
"url": null
|
| 503 |
+
}
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"41": {
|
| 507 |
+
"title": "Investigating Crowdsourcing Protocols for Evaluating the Factual Consistency of Summaries.",
|
| 508 |
+
"author": "Tang, X.; Fabbri, A.; Li, H.; Mao, Z.; Adams, G.; Wang, B.; Celikyilmaz, A.; Mehdad, Y.; and Radev, D. 2022.",
|
| 509 |
+
"venue": "In Carpuat, M.; de Marneffe, M.-C.; and Meza Ruiz, I. V., eds., Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 5680\u20135692. Seattle, United States: Association for Computational Linguistics.",
|
| 510 |
+
"url": null
|
| 511 |
+
}
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"42": {
|
| 515 |
+
"title": "Sliders, visual analogue scales, or buttons: Influence of formats and scales in mobile and desktop surveys.",
|
| 516 |
+
"author": "Toepoel, V.; and Funke, F. 2018.",
|
| 517 |
+
"venue": "Mathematical Population Studies, 25(2): 112\u2013122.",
|
| 518 |
+
"url": null
|
| 519 |
+
}
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"43": {
|
| 523 |
+
"title": "How reproducible is best-worst scaling for human evaluation? A reproduction of \u2018Data-to-text Generation with Macro Planning\u2019.",
|
| 524 |
+
"author": "van Miltenburg, E.; Braggaar, A.; Braun, N.; Damen, D.; Goudbeek, M.; van der Lee, C.; Tomas, F.; and Krahmer, E. 2023.",
|
| 525 |
+
"venue": "In Belz, A.; Popovi\u0107, M.; Reiter, E.; Thomson, C.; and Sedoc, J., eds., Proceedings of the 3rd Workshop on Human Evaluation of NLP Systems, 75\u201388. Varna, Bulgaria: INCOMA Ltd., Shoumen, Bulgaria.",
|
| 526 |
+
"url": null
|
| 527 |
+
}
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"44": {
|
| 531 |
+
"title": "Large scale image annotation: learning to rank with joint word-image embeddings.",
|
| 532 |
+
"author": "Weston, J.; Bengio, S.; and Usunier, N. 2010.",
|
| 533 |
+
"venue": "Machine learning, 81: 21\u201335.",
|
| 534 |
+
"url": null
|
| 535 |
+
}
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"45": {
|
| 539 |
+
"title": "Listwise approach to learning to rank: theory and algorithm.",
|
| 540 |
+
"author": "Xia, F.; Liu, T.-Y.; Wang, J.; Zhang, W.; and Li, H. 2008.",
|
| 541 |
+
"venue": "In Proceedings of the 25th international conference on Machine learning, 1192\u20131199.",
|
| 542 |
+
"url": null
|
| 543 |
+
}
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"46": {
|
| 547 |
+
"title": "Learning to Rank when Grades Matter.",
|
| 548 |
+
"author": "Yan, L.; Qin, Z.; Shamir, G.; Lin, D.; Wang, X.; and Bendersky, M. 2023.",
|
| 549 |
+
"venue": "arXiv:2306.08650.",
|
| 550 |
+
"url": null
|
| 551 |
+
}
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"47": {
|
| 555 |
+
"title": "American-Japanese Differences in Multiscalar Intensity Ratings of Universal Facial Expressions of Emotion.",
|
| 556 |
+
"author": "Yrizarry, N.; Matsumoto, D.; and Wilson-Cohn, C. 1998.",
|
| 557 |
+
"venue": "Motivation and Emotion, 22: 315\u2013327.",
|
| 558 |
+
"url": null
|
| 559 |
+
}
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"48": {
|
| 563 |
+
"title": "DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation.",
|
| 564 |
+
"author": "Zhang, Y.; Sun, S.; Galley, M.; Chen, Y.-C.; Brockett, C.; Gao, X.; Gao, J.; Liu, J.; and Dolan, B. 2020.",
|
| 565 |
+
"venue": "arXiv:1911.00536.",
|
| 566 |
+
"url": null
|
| 567 |
+
}
|
| 568 |
+
}
|
| 569 |
+
],
|
| 570 |
+
"url": "http://arxiv.org/html/2408.09765v1"
|
| 571 |
+
}
|
20240819/2408.09773v1.json
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Are Large Language Models More Honest in Their Probabilistic or Verbalized Confidence?",
|
| 3 |
+
"abstract": "Large language models (LLMs) have been found to produce hallucinations when the question exceeds their internal knowledge boundaries. A reliable model should have a clear perception of its knowledge boundaries, providing correct answers within its scope and refusing to answer when it lacks knowledge. Existing research on LLMs\u2019 perception of their knowledge boundaries typically uses either the probability of the generated tokens or the verbalized confidence as the model\u2019s confidence in its response. However, these studies overlook the differences and connections between the two. In this paper, we conduct a comprehensive analysis and comparison of LLMs\u2019 probabilistic perception and verbalized perception of their factual knowledge boundaries. First, we investigate the pros and cons of these two perceptions. Then, we study how they change under questions of varying frequencies. Finally, we measure the correlation between LLMs\u2019 probabilistic confidence and verbalized confidence. Experimental results show that 1) LLMs\u2019 probabilistic perception is generally more accurate than verbalized perception but requires an in-domain validation set to adjust the confidence threshold. 2) Both perceptions perform better on less frequent questions. 3) It is challenging for LLMs to accurately express their internal confidence in natural language.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Recently, large language models (LLMs) have demonstrated remarkable performance across various NLP tasks [18 ###reference_b18###, 3 ###reference_b3###, 6 ###reference_b6###]. Despite their impressive capabilities, LLMs have several significant limitations. One critical issue is that LLMs can produce hallucinations, generating factually incorrect answers that appear accurate, primarily occurring when the question exceeds the model\u2019s internal knowledge boundaries [23 ###reference_b23###].\nA reliable system should provide correct answers when it knows the answer and refuses to answer when it does not, rather than fabricating unreliable responses, which is especially important in areas such as safety and healthcare. This requires the model to have a clear understanding of its knowledge boundaries, knowing what it knows and what it does not know.\nA model with a clear perception of its knowledge boundaries is not only more reliable but can also aid downstream tasks. For example, it can help retrieval augmentation (RA) where RA can be triggered only when the model expresses uncertainty about its answers to enhance efficiency and effectiveness, which we call adaptive retrieval augmentation. This is because retrieval augmentation incurs additional overhead and the quality of retrieved documents cannot be guaranteed, potentially misleading the model instead.\nExisting research on the model\u2019s perception of its knowledge boundaries mainly involves two types of confidence: probabilistic confidence [7 ###reference_b7###, 4 ###reference_b4###, 9 ###reference_b9###, 10 ###reference_b10###, 19 ###reference_b19###] where they use the probability of the generated tokens as the model\u2019s confidence and verbalized confidence where LLMs are taught to express their confidence in words [14 ###reference_b14###, 24 ###reference_b24###, 20 ###reference_b20###, 21 ###reference_b21###, 22 ###reference_b22###, 17 ###reference_b17###]. These represent the model\u2019s probabilistic and verbalized perceptions of its knowledge boundaries. However, these works only explore these perspectives separately, overlooking their differences and connections.\nIn this paper, we investigate LLMs\u2019 probabilistic perception and verbalized perception of their factual knowledge boundaries, analyzing the differences and correlations between them. Specifically, we try to answer three research questions. RQ1: What are the pros and cons of these two perceptions? Inspired by the previous finding that LLMs can generate more accurate answers for more common questions [15 ###reference_b15###], we also wonder RQ2: How do these two perceptions change under questions of varying frequencies? In addition to exploring the differences between these two perceptions, we also study their correlations, so the last research question is RQ3: Can LLMs accurately express their internal confidence in natural language?.\nTo answer RQ1, we choose four widely used LLMs and conduct experiments on the representative factual QA benchmark, i.e., Natural Questions (NQ) [12 ###reference_b12###]. Experimental results indicate that LLMs\u2019 probabilistic perception of their knowledge boundaries is more accurate than their verbalized perception. However, probabilistic perception necessitates the use of an in-domain dataset to determine an appropriate confidence threshold for binarizing continuous probabilistic confidence. In contrast, LLMs\u2019 verbalized perception performs at a reasonable level without requiring additional setup.\nTo answer RQ2, we test two powerful black-box models on the Parent and Child dataset [2 ###reference_b2###] where questions in the Child dataset are less common than those in the Parent dataset. We find that both LLMs\u2019 probabilistic perception and verbalized perception of their knowledge boundaries perform better on the Child dataset than on the Parent dataset. This indicates that LLMs\u2019 perception levels decline on more familiar questions. Additionally, for less common questions, probabilistic perception outperforms verbalized perception by a greater margin.\nTo answer RQ3, we adopt two commonly used correlation coefficient methods: the Spearman correlation coefficient [8 ###reference_b8###] and the Kendall correlation coefficient [1 ###reference_b1###]. These methods are used to calculate the correlation between probabilistic confidence and verbalized confidence for four LLMs (the same models used in RQ1) on the NQ, Parent, and Child datasets. We show that, overall, LLMs\u2019 verbalized confidence is positively correlated with their probabilistic confidence. However, at a finer granularity, the correlation is weak and varies significantly across different datasets. Therefore, we conclude that it is challenging for LLMs to accurately express their internal confidence in natural language."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "Many studies have investigated deep neural models\u2019 perception of their knowledge boundaries, which can be primarily divided into two categories: probabilistic perception and verbalized perception."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Preliminaries",
|
| 21 |
+
"text": "In this section, we will introduce our task and the basic experimental setup."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Task Formulation",
|
| 27 |
+
"text": "The goal of open-domain QA is to ask the model to provide an answer for a given question . Unlike previous small-scale models [5 ###reference_b5###] that rely on the retrieve-then-read pipeline [11 ###reference_b11###, 13 ###reference_b13###, 16 ###reference_b16###], where relevant external documents are first retrieved for the question and then the model extracts the correct answer from these documents, LLMs can answer the question directly based on their internal knowledge. We instruct LLMs to answer using prompt and the format can be described as:\nInstead of only obtaining the answer, we also expect the model to express its confidence in the answer. We focus on two types of confidence: verbalized confidence and probabilistic confidence.\nVerbalized Confidence. LLMs are found to have the power to express their confidence in words which we refer to as verbalized confidence . We use to instruct the model to generate the answer along with its verbalized confidence and the format is:\nwhere indicates the model is confident in its answer while means the opposite.\nProbabilistic Confidence. Perplexity can reflect the model\u2019s internal degree of certainty in the answer which we refer to as probabilistic confidence . For an answer consisting of tokens , is computed as:\nwhere a lower implies that the model is more confident in the answer."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Experimental Setup",
|
| 33 |
+
"text": "To facilitate the model answering the question and expressing its confidence in the answer, we use prompt =\u201cAnswer the following question based on your internal knowledge with one or few words. If you are sure the answer is accurate and correct, please say certain after the answer. If you are not confident with the answer, please say uncertain. Question: {question}. Answer:\" where {question} is the placeholder for the question .\nWe conduct experiments on two representative open-source models (Llama2-7B-Chat and Mistral-7B-Instruct-v0.2) and two widely used black-box models that can return the probability of the generated tokens, including ChatGPT (gpt-3.5-turbo-1106) and GPT-Instruct (gpt-3.5-turbo-instruct). For all the models, we set temperature=1 to obtain the raw probabilities."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Evaluating LLMs\u2019 Probabilistic and Verbalized Perceptions of Their Knowledge Boundaries",
|
| 39 |
+
"text": "In this section, we investigate the performance of LLMs\u2019 probabilistic and verbalized perceptions of their knowledge boundaries and try to answer RQ1."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Exprimental Setup",
|
| 45 |
+
"text": "We conduct experiments on a widely used open-domain QA dataset, Natural Questions (NQ) [12 ###reference_b12###]. NQ is constructed using Google Search queries with annotated short or long answers related to factual knowledge. For our experiments, we use only questions with short answers from the test set and treat these short answers as labels. We randomly sample 20% of the data as the validation set, and report results on the remaining data.\nFollowing previous research [17 ###reference_b17###], we use Alignment, Overconfidence, and Conservativeness to measure LLMs\u2019 perception of their knowledge boundaries. Accuracy (acc for short) is employed to represent the QA performance, where a response is deemed correct if it contains the ground-truth label. Uncertain rate is computed as the proportion of samples where the model expresses uncertainty and is used to represent the model\u2019s uncertainty level.\nIn view of verbalized perception, is computed by the proportion of samples where LLMs\u2019 confidence matches the correctness of the response (i.e., =). is the proportion of samples where the model is confident but the response is incorrect (i.e., ), and is used to measure the proportion of samples where the model expresses uncertainty but the response is correct (i.e., ).\nUnlike verbalized confidence, probabilistic confidence is a continuous value and cannot be directly matched with binary accuracy. Therefore, we set a threshold to binarize probabilistic confidence and the format is:\nThen, similar to the metrics for verbalized perception, we compute , , and . We use the confidence threshold which achieves the optimal on the validation set as ."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Results and Analysis",
|
| 51 |
+
"text": "###figure_1### The results of LLMs\u2019 QA performance and probabilistic and verbalized perceptions of their knowledge boundaries are shown in Table 1 ###reference_###. We observe that:\n1) When expressing confidence in words, LLMs are not well-calibrated and tend to be overconfident which is consistent with the previous findings [17 ###reference_b17###].\n2) The probabilistic confidence is consistently much lower than the verbalized confidence, and the probabilistic alignment is significantly higher than the verbalized alignment across all models. This indicates that, compared to judging the correctness of an answer in words, LLMs have a better probabilistic perception of their knowledge boundaries. The possible reason may be that, when expressing confidence in words, LLMs do not have access to the probability distribution of the generated answer, which can be a useful signal representing the correctness of the answer.\n3) A good probabilistic perception requires an additional in-domain dataset to select an appropriate threshold. Figure 1 ###reference_### shows that the best probabilistic confidence of each model varies significantly across different datasets."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Effects of Question Frequency",
|
| 57 |
+
"text": "LLMs often achieve better QA performance on common questions compared to unfamiliar ones [15 ###reference_b15###]. In this section, we investigate the effects of question frequency on LLMs\u2019 perception of their knowledge boundaries and answer RQ2."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5.1",
|
| 61 |
+
"parent_section_id": "5",
|
| 62 |
+
"section_name": "Experimental Setup",
|
| 63 |
+
"text": "The Parent-Child dataset is a collection of facts about actual celebrities and their parents, presented in the form \u201cA\u2019s parent is B\" and \u201cB\u2019s child is A\" where A is the name of the celebrity and B is the name of A\u2019s parent [2 ###reference_b2###]. LLMs are often more familiar with questions asking about the names of famous people\u2019s parents because these are more likely to appear in training corpora. We collect questions in the form \"Who is A\u2019s mother/father\" and name this dataset \"Parent.\" Correspondingly, questions in the form \"Name a child of B\" are named \"Child.\" Each dataset contains 1513 question-answer pairs. The dataset splitting strategy, metrics, and other parameters are the same as those in Section \u00a74 ###reference_###.\nWe find that Llama and Mistral often refuse to answer questions containing names. So, we conduct experiments only on GPT-Instruct and ChatGPT."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5.2",
|
| 67 |
+
"parent_section_id": "5",
|
| 68 |
+
"section_name": "Results and Analysis",
|
| 69 |
+
"text": "Table 2 ###reference_### shows the QA performance of GPT-Instruct and ChatGPT on questions of different frequencies, along with the models\u2019 perception of their knowledge boundaries. We find that:\n1) LLMs achieve better QA performance which aligns with the previous findings in [2 ###reference_b2###] and are more confident in the parent dataset compared to the child dataset.\n2) Both verbalized and probabilistic alignment are higher on the child dataset. This indicates that LLMs have a better perception of their knowledge boundaries on less common questions rather than more familiar ones.\n3) From common questions to unfamiliar ones, probabilistic alignment demonstrates a greater increase compared to verbalized alignment. The reason is that probabilistic confidence drops to a very low level on the child dataset which mitigates the level of overconfidence. It shows that these models have a very clear probabilistic understanding of what they do not know on the unfamiliar questions. At the same time, they also maintain the perception level of what they know. In view of verbalized perception, LLMs have a more accurate judgment of what they know. However, they are still overconfident which is the primary reason for the unsatisfactory perception of knowledge boundaries."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "The Correlation Between LLMs\u2019 Probabilistic Confidence and Verbalized Confidence",
|
| 75 |
+
"text": "In this section, we study the correlation between LLMs\u2019 probabilistic confidence and verbalized confidence to answer RQ3."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "6.1",
|
| 79 |
+
"parent_section_id": "6",
|
| 80 |
+
"section_name": "Experimental Setup",
|
| 81 |
+
"text": "For a probabilistic confidence list and a verbalized confidence list , we utilize two commonly used correlation coefficients: Spearman [8 ###reference_b8###] and Kendall [1 ###reference_b1###] correlation coefficients to measure their correlations.\nSpearman\u2019s rank correlation coefficient uses the rank of each value in the lists and to measure their correlation and the formula is:\nwhere and is data count. The value of ranges from [-1, 1], where a larger absolute value indicates a stronger correlation. 1 represents a perfect positive correlation, while -1 represents a perfect negative correlation.\nKendall\u2019s rank correlation coefficient is defined based on the concepts of concordant pairs and discordant pairs. A concordant pair or discordant pair refers to a pair where the relative ordering of the two variables is consistent (e.g., and ) or not (e.g., and ). The format is:\nwhere is the count of concordant pairs, is the count of discordant pairs, and is the data count. Similar to , the value of also ranges from [-1, 1], where 1 represents a perfect positive correlation, while -1 represents the verse vice.\nWe calculate the correlation coefficients in two modes. 1) Vanilla: We take LLMs\u2019 probabilistic confidence and verbalized confidence for all the data as and . 2) Bin-k. To mitigate the influence of the order of individual samples and estimate the overall trend, we sort all the data in ascending order based on the probabilistic confidence and divide them into bins with the same length. The probabilistic confidence and verbalized confidence of each bin are the average values of the data within that bin. This yields lists and , each of length . In this paper, We set to 10."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6.2",
|
| 85 |
+
"parent_section_id": "6",
|
| 86 |
+
"section_name": "Results and Analysis",
|
| 87 |
+
"text": "The results can be seen in Table 3 ###reference_### and we visualize the changes in verbalized confidence with probabilistic confidence under the Bin-10 mode, as shown in Figure 2 ###reference_###. We conclude that:\n1) In Vanilla mode, the correlation coefficient is small, but it is much higher in Bin-10 mode (except for ChatGPT on the Parent dataset). This indicates that the correlation between the model\u2019s probabilistic confidence and verbalized confidence is relatively low although there is an overall trend showing that verbalized uncertainty increases as probabilistic uncertainty increases.\n2) The correlation varies significantly across different datasets for the same model. For instance, probabilistic confidence and verbalized confidence of GPT-instruct show a clear overall trend on the child dataset, whereas, on the parent dataset, they are almost entirely unrelated. Therefore, it is challenging for LLMs to accurately express their internal confidence in words.\n###figure_2###"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6.2.1",
|
| 91 |
+
"parent_section_id": "6.2",
|
| 92 |
+
"section_name": "6.2.1 Correlation Between LLMs\u2019 Confidence and Their QA Performance.",
|
| 93 |
+
"text": "To more intuitively describe LLMs\u2019 perception of their knowledge boundaries, we investigate the correlation between probabilistic confidence and QA performance, as well as between verbalized confidence and QA performance, without binarizing the probabilistic confidence using a threshold. We calculate these correlations using Spearman and Kendall coefficients in both Vanilla and Bin-10 modes. The results are shown in Table 4 ###reference_###. We observe that:\n1) Both LLMs\u2019 probabilistic confidence and verbalized confidence are positively correlated with their QA performance, with probabilistic confidence generally showing a stronger correlation with QA performance than verbalized confidence. This indicates that LLMs have a better probabilistic perception of their knowledge boundaries even without a precise threshold, which aligns with the conclusion in Section \u00a74 ###reference_###.\n2) Both correlations are very strong in the Bin-10 but relatively weak in the vanilla mode, representing that LLMs have a less nuanced but overall clear perception of their knowledge boundaries."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "7",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "In this paper, we conduct a comprehensive analysis and comparison of LLMs\u2019 probabilistic and verbalized perceptions of their factual knowledge boundaries. Specifically, we focus on answering three research questions: RQ1: What are the pros and cons of these two perceptions; RQ2: How do these two perceptions change under questions of varying frequencies; and RQ3: Can LLMs accurately\nexpress their internal confidence in natural language. We conduct extensive experiments on four commonly used LLMs and three open-source datasets and find that 1) LLMs\u2019 probabilistic perception is generally more accurate than verbalized perception but requires an in-domain validation set to adjust the confidence threshold. 2) Both perceptions perform better on less frequent questions and probabilistic perception outperforms verbalized perception by a greater margin on these questions. 3) It is challenging for LLMs to accurately express their internal confidence in natural language."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [],
|
| 103 |
+
"tables": {
|
| 104 |
+
"1": {
|
| 105 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>LLMs\u2019 probabilistic and verbalized perceptions of their knowledge boundaries of LLMs on NQ. Bold denotes the highest score for each model. Unc., Conserv., and Overconf. stand for Uncertain rate, Conservativeness, and Overconfidence, respectively.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T1.1\" style=\"width:362.2pt;height:162pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1.0,1.0) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1\">Model</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.2.1\">Acc</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.3.1\">Strategy</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.4.1\">Unc.</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.5.1\">Alignment</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.6.1\">Overconf.</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.7.1\">Conserv.</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.2.1.1.1\">Llama2</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.2.1.2.1\">0.2957</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.4\">0.1894</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.5\">0.4512</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.2.1.6.1\">0.5319</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.1.7\">0.0170</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.2.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.3.2.2.1\">0.8764</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.3.2.3.1\">0.7254</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.2.4\">0.0512</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.3.2.5.1\">0.2233</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.4.3.1.1\">Mistral</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.4.3.2.1\">0.2985</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.4\">0.4848</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.5\">0.6260</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.4.3.6.1\">0.2954</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.4.3.7\">0.0786</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.5.4.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.5.4.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.5.4.2.1\">0.9034</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.5.4.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.5.4.3.1\">0.7185</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.5.4.4\">0.0398</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.5.4.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.5.4.5.1\">0.2417</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.6.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.6.5.1.1\">GPT-Instruct</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.6.5.2.1\">0.4021</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.4\">0.1868</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.5\">0.5182</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.6.5.6.1\">0.4464</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.6.5.7\">0.0354</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.7.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.7.6.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.7.6.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.7.6.2.1\">0.6891</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.7.6.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.7.6.3.1\">0.6551</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.7.6.4\">0.1269</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.7.6.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.7.6.5.1\">0.2180</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.8.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.1.1.8.7.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.8.7.1.1\">ChatGPT</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.1.1.8.7.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.8.7.2.1\">0.4229</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.8.7.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.8.7.4\">0.2111</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.8.7.5\">0.5252</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.8.7.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.8.7.6.1\">0.4204</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.8.7.7\">0.0554</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.9.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.9.8.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.9.8.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.9.8.2.1\">0.6443</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.9.8.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.9.8.3.1\">0.6741</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.9.8.4\">0.1294</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.9.8.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.9.8.5.1\">0.1985</span></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 106 |
+
"capture": "Table 1: LLMs\u2019 probabilistic and verbalized perceptions of their knowledge boundaries of LLMs on NQ. Bold denotes the highest score for each model. Unc., Conserv., and Overconf. stand for Uncertain rate, Conservativeness, and Overconfidence, respectively."
|
| 107 |
+
},
|
| 108 |
+
"2": {
|
| 109 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>LLMs\u2019 probabilistic and verbalized perceptions of their knowledge boundaries on Parent and Child datasets. Bold denotes the highest score for each model. Unc., Conserv., and Overconf. stand for Uncertain rate, Conservativeness, and Overconfidence respectively.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S5.T2.1\" style=\"width:408.0pt;height:162pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1.0,1.0) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.1.1\">Model</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.2.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.3.1\">Acc</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.4.1\">Strategy</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.5.1\">Unc.</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.6.1\">Alignment</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.7.1\">Overconf.</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T2.1.1.1.1.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.8.1\">Conserv.</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.1\" rowspan=\"4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.2.1.1.1\">GPT-Instruct</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.2.1.2.1\">Parent</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.3\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.2.1.3.1\">0.5475</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.4\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.5\">0.3642</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.6\">0.6424</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.7\">0.2230</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.2.1.8.1\">0.1346</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.2\">0.2114</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.3\">0.6846</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.3.2.4.1\">0.2783</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.5\">0.0372</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.4.3.1.1\">Child</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.4.3.2.1\">0.1540</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.4\">0.6018</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.5\">0.7243</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.6\">0.2599</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.4.3.7\">0.0157</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.5.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.5.4.2.1\">0.8609</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.5.4.3.1\">0.8593</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.4\">0.0629</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.5\">0.0778</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.6.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S5.T2.1.1.6.5.1\" rowspan=\"4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.6.5.1.1\">ChatGPT</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.6.5.2.1\">Parent</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.3\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.6.5.3.1\">0.5778</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.4\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.5\">0.2288</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.6\">0.6198</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.7\">0.2868</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.6.5.8\">0.0934</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.7.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.7.6.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.7.6.2\">0.3877</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.7.6.3\">0.7670</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.7.6.4\">0.1337</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.7.6.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.7.6.5.1\">0.0992</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.8.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S5.T2.1.1.8.7.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.8.7.1.1\">Child</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S5.T2.1.1.8.7.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T2.1.1.8.7.2.1\">0.1322</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.8.7.3\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.8.7.4\">0.5632</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.8.7.5\">0.6803</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.8.7.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.8.7.6.1\">0.3121</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.8.7.7\">0.0075</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.9.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.1.1.9.8.1\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.1.1.9.8.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.9.8.2.1\">0.9339</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.1.1.9.8.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.9.8.3.1\">0.8820</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.1.1.9.8.4\">0.0259</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T2.1.1.9.8.5\">0.0921</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 110 |
+
"capture": "Table 2: LLMs\u2019 probabilistic and verbalized perceptions of their knowledge boundaries on Parent and Child datasets. Bold denotes the highest score for each model. Unc., Conserv., and Overconf. stand for Uncertain rate, Conservativeness, and Overconfidence respectively."
|
| 111 |
+
},
|
| 112 |
+
"3": {
|
| 113 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Correlation coefficients between LLMs\u2019 verbalized confidence and probabilistic confidence. Bold denotes the highest score on each dataset.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S6.T3.1\" style=\"width:394.5pt;height:108pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1.0,1.0) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S6.T3.1.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.1.1\">\n<td class=\"ltx_td ltx_border_tt\" id=\"S6.T3.1.1.1.1.1\"></td>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T3.1.1.1.1.2\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T3.1.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.1.1.3.1\">Llama</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T3.1.1.1.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.1.1.4.1\">Mistral</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S6.T3.1.1.1.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.1.1.5.1\">GPT-Instruct</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S6.T3.1.1.1.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.1.1.6.1\">ChatGPT</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.2.2\">\n<td class=\"ltx_td\" id=\"S6.T3.1.1.2.2.1\"></td>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S6.T3.1.1.2.2.2\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T3.1.1.2.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.3.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T3.1.1.2.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.4.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.5.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.6.1\">Parent</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.7.1\">Child</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.8.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.9.1\">Parent</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T3.1.1.2.2.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.2.2.10.1\">Child</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.3.3.1.1\">Vanilla</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.2\">Spearman</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.3\">0.24</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.3.3.4.1\">0.37</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.5\">0.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.6\">0.13</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.3.3.7.1\">0.38</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.8\">0.22</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.3.3.9.1\">0.2</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.3.3.10\">0.28</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.1\">Kendall</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.2\">0.2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.4.4.3.1\">0.3</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.4\">0.19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.5\">0.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.4.4.6.1\">0.31</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.7\">0.18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.4.4.8.1\">0.16</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T3.1.1.4.4.9\">0.23</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.5.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S6.T3.1.1.5.5.1\" rowspan=\"2\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.5.5.1.1\">Bin-10</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.2\">Spearman</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.3\">0.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.4\">0.81</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.5\">0.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.6\">0.12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.5.5.7.1\">0.92</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.5.5.8.1\">0.95</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.5.5.9.1\">0.88</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T3.1.1.5.5.10\">0.48</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T3.1.1.6.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.1\">Kendall</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.2\">0.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.3\">0.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.4\">0.78</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.5\">0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.6.6.6.1\">0.85</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.6.6.7.1\">0.87</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T3.1.1.6.6.8.1\">0.78</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T3.1.1.6.6.9\">0.33</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 114 |
+
"capture": "Table 3: Correlation coefficients between LLMs\u2019 verbalized confidence and probabilistic confidence. Bold denotes the highest score on each dataset."
|
| 115 |
+
},
|
| 116 |
+
"4": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Correlation coefficients between LLMs\u2019 confidence and their QA performance. Bold denotes the highest score for each correlation coefficient. Prob. and Verb. stand for probabilistic confidence and verbalized confidence, respectively.</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S6.T4.1\" style=\"width:430.5pt;height:180pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1.0,1.0) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S6.T4.1.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.1.1\">\n<td class=\"ltx_td ltx_border_tt\" id=\"S6.T4.1.1.1.1.1\"></td>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T4.1.1.1.1.2\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T4.1.1.1.1.3\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T4.1.1.1.1.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.1.1.4.1\">Llama</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S6.T4.1.1.1.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.1.1.5.1\">Mistral</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S6.T4.1.1.1.1.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.1.1.6.1\">GPT-Instruct</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"3\" id=\"S6.T4.1.1.1.1.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.1.1.7.1\">ChatGPT</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.2.2\">\n<td class=\"ltx_td\" id=\"S6.T4.1.1.2.2.1\"></td>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S6.T4.1.1.2.2.2\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S6.T4.1.1.2.2.3\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T4.1.1.2.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.4.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S6.T4.1.1.2.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.5.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.6.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.7.1\">Parent</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.8.1\">Child</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.9.1\">NQ</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.10.1\">Parent</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S6.T4.1.1.2.2.11\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.2.2.11.1\">Child</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.1\" rowspan=\"4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.1.1\">Vanilla</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T4.1.1.3.3.2.1\">Spearman</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.3\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.4.1\">0.31</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.5\">0.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.6.1\">0.34</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.7.1\">0.44</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.8\">0.41</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.9.1\">0.35</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.10.1\">0.58</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.3.3.11\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.3.3.11.1\">0.42</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.1\">Verb.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.2\">0.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.4.4.3.1\">0.28</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.4\">0.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.5\">0.26</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.4.4.6.1\">0.44</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.7\">0.19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.8\">0.18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.4.4.9\">0.37</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.5.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T4.1.1.5.5.1.1\">Kendall</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.2\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.5.5.3.1\">0.25</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.4\">0.19</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.5.5.5.1\">0.28</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.5.5.6.1\">0.36</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.7\">0.33</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.5.5.8.1\">0.29</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.5.5.9.1\">0.47</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.5.5.10\">0.35</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.6.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.1\">Verb.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.2\">0.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.6.6.3.1\">0.28</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.4\">0.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.5\">0.26</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.6.6.6.1\">0.44</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.7\">0.19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.8\">0.18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.6.6.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.6.6.9.1\">0.37</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.7.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S6.T4.1.1.7.7.1\" rowspan=\"4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.1.1\">Bin-10</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T4.1.1.7.7.2.1\">Spearman</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.3\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.4.1\">0.99</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.5.1\">0.99</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.6.1\">1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.7.1\">0.99</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.8.1\">0.98</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.9.1\">1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.7.7.10.1\">0.98</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.7.7.11\">0.89</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.8.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.1\">Verb.</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.2\">0.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.3\">0.91</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.4\">0.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.5\">0.88</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.6\">0.85</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.7\">0.45</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.8\">0.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S6.T4.1.1.8.8.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.8.8.9.1\">0.92</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.9.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S6.T4.1.1.9.9.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T4.1.1.9.9.1.1\">Kendall</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.2\">Prob.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.3.1\">0.96</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.4.1\">0.96</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.5.1\">1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.6.1\">0.96</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.7.1\">0.94</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.8\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.8.1\">1</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.9.9.9.1\">0.94</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S6.T4.1.1.9.9.10\">0.81</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T4.1.1.10.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.1\">Verb.</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.2\">0.62</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.3\">0.81</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.4\">0.62</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.5\">0.78</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.6\">0.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.7\">0.35</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.8\">0.71</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S6.T4.1.1.10.10.9\"><span class=\"ltx_text ltx_font_bold\" id=\"S6.T4.1.1.10.10.9.1\">0.85</span></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 118 |
+
"capture": "Table 4: Correlation coefficients between LLMs\u2019 confidence and their QA performance. Bold denotes the highest score for each correlation coefficient. Prob. and Verb. stand for probabilistic confidence and verbalized confidence, respectively."
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
"image_paths": {
|
| 122 |
+
"1": {
|
| 123 |
+
"figure_path": "2408.09773v1_figure_1.png",
|
| 124 |
+
"caption": "Figure 1: The best threshold \u03bb\ud835\udf06\\lambdaitalic_\u03bb for GPT-Instruct and ChatGPT on each dataset.",
|
| 125 |
+
"url": "http://arxiv.org/html/2408.09773v1/extracted/5799495/figs/thre.png"
|
| 126 |
+
},
|
| 127 |
+
"2": {
|
| 128 |
+
"figure_path": "2408.09773v1_figure_2.png",
|
| 129 |
+
"caption": "Figure 2: Correlation between LLMs\u2019 probabilistic confidence and verbalized confidence. A higher uncertainty level means the model is less confident in its answer.",
|
| 130 |
+
"url": "http://arxiv.org/html/2408.09773v1/extracted/5799495/figs/ppl_conf.png"
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
"validation": true,
|
| 134 |
+
"references": [],
|
| 135 |
+
"url": "http://arxiv.org/html/2408.09773v1"
|
| 136 |
+
}
|
20240819/2408.09792v1.json
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Unsupervised Composable Representations for Audio",
|
| 3 |
+
"abstract": "Current generative models are able to generate high-quality artefacts but have been shown to struggle with compositional reasoning, which can be defined as the ability to generate complex structures from simpler elements. In this paper, we focus on the problem of compositional representation learning for music data, specifically targeting the fully-unsupervised setting. We propose a simple and extensible framework that leverages an explicit compositional inductive bias, defined by a flexible auto-encoding objective that can leverage any of the current state-of-art generative models. We demonstrate that our framework, used with diffusion models, naturally addresses the task of unsupervised audio source separation, showing that our model is able to perform high-quality separation. Our findings reveal that our proposal achieves comparable or superior performance with respect to other blind source separation methods and, furthermore, it even surpasses current state-of-art supervised baselines on signal-to-interference ratio metrics. Additionally, by learning an a-posteriori masking diffusion model in the space of composable representations, we achieve a system capable of seamlessly performing unsupervised source separation, unconditional generation, and variation generation. Finally, as our proposal works in the latent space of pre-trained neural audio codecs, it also provides a lower computational cost with respect to other neural baselines.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Generative models recently became one of the most important topic in machine learning research. Their goal is to learn the underlying probability distribution of a given dataset in order to accomplish a variety of downstream tasks, such as sampling or density estimation. These models, relying on deep neural networks as their core architecture, have demonstrated unprecedented capabilities in capturing intricate patterns and generating complex and realistic data [1 ###reference_b1###]. Although these systems are able to generate impressive results that go beyond the replication of training data, some doubts have recently been raised about their actual reasoning and extrapolation abilities [2 ###reference_b2###, 3 ###reference_b3###]. Notably, a critical question remains on their capacity to perform compositional reasoning. The principle of compositionality states that the meaning of a complex expression is dependent on the meanings of its individual components and the rules employed to combine them [4 ###reference_b4###, 5 ###reference_b5###]. This concept also plays a significant role in machine learning [6 ###reference_b6###], with a particular emphasis in the fields of NLP and vision. Indeed, compositionality holds a strong significance in the interpretability of machine learning algorithms [7 ###reference_b7###], ultimately providing a better understanding of the behaviour of such complex systems. In line with recent studies on compositional inductive biases [8 ###reference_b8###, 9 ###reference_b9###], taking a compositional approach would allow to build better representation learning and more effective generative models, but research on compositional learning for audio is still lacking.\nIn this work, we specifically focus on the problem of compositional representation learning for audio and propose a generic and simple framework that explicitly targets the learning of composable representations in a fully unsupervised way. Our idea is to learn a set of low-dimensional latent variables that encode semantic information which are then used by a generative model to reconstruct the input. While we build our approach upon recent diffusion models, we highlight that our framework can be implemented with any state-of-the-art generative system. Therefore, our proposal effectively combines diffusion models and auto-encoders and represents, to the best of our knowledge, one of the first contributions that explicitly target the learning of unsupervised compositional semantic representations for audio. Although being intrinsically modality-agnostic, we show that our system can be used to perform unsupervised source separation and we validate this claim by performing experiments on standard benchmarks, comparing against both unsupervised and supervised baselines. We show that our proposal outperforms all unsupervised methods, and even supervised methods on some metrics.\nMoreover, as we are able to effectively perform latent source separation, we complement our decomposition system with a prior model that performs unconditional generation and variation generation [10 ###reference_b10###]. Hence, our method is able to take an audio mixture as input, and generate several high-quality variations for one of the instrumental part only, effectively allowing to control regeneration of a source audio material in multi-instrument setups. Furthermore, we train a masking diffusion model in the latent space of composable representation and show that our framework is able to handle both decomposition and generation in an effective way without any supervision. We provide audio examples, additional experiments and source code on a supporting webpage111 https://github.com/ismir-24-sub/unsupervised_compositional_representations"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Background",
|
| 15 |
+
"text": "In this section, we review the fundamental components of our methodology. Hence, we briefly introduce the principles underlying diffusion models and a recent variation rooted in autoencoders, referred to as Diffusion Autoencoder [11 ###reference_b11###], which serves as the basis for our formulation.\nNotation. Throughout this paper, we suppose a dataset of i.i.d. data points coming from an unknown distribution . We denote , and as the set of parameters learned through back-propagation [12 ###reference_b12###]."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Diffusion models",
|
| 21 |
+
"text": "Diffusion models (DMs) are a recent class of generative models that can synthesize high-quality samples by learning to reverse a stochastic process that gradually adds noise to the data. DMs have been successfully applied across diverse domains, including computer vision [13 ###reference_b13###], natural language processing [14 ###reference_b14###], audio [15 ###reference_b15###] and video generation [16 ###reference_b16###]. These applications span tasks such as unconditional and conditional generation, editing, super-resolution and inpainting, often yielding state of the art results.\nThis model family has been introduced by [17 ###reference_b17###] and has its roots in statistical physics, but there now exist many derivations with different formalisms that generalise the original formulation. At their core, DMs are composed of a forward and reverse Markov chain that respectively adds and removes Gaussian noise from data. Recently, [18 ###reference_b18###] established a connection between DM and denoising score matching [19 ###reference_b19###, 20 ###reference_b20###], introducing simplifications to the original training objective and demonstrating strong experimental results. Intuitively, the authors propose to learn a function that takes a noise-corrupted version of the input and predicts the noise used to corrupt the data. Specifically, the forward process gradually adds Gaussian noise to the data according to an increasing noise variance schedule , following the distribution\nwith and . Following the notation and , diffusion models approximate the reverse process by learning a function that predicts by\nwith usually implemented as a U-Net [21 ###reference_b21###] and the step .\nDeterministic diffusion. More recently, [22 ###reference_b22###] introduced Denoising Diffusion Implicit Models (DDIM), extending the diffusion formulation with non-Markovian modifications, thus enabling deterministic diffusion models and substantially increasing their sampling speed. They also established an equivalence between their objective function and the one from [18 ###reference_b18###], highlighting the generality of their formulation. Finally, [23 ###reference_b23###] further generalized this approach and proposed Iterative (de)Blending (IADB), simplifying the theory of DDIM while removing the constraint for the target distribution to be Gaussian. In fact, given a base distribution222For simplicity we assume , we corrupt the input data by linear interpolation with and learn a U-Net by optimizing, e.g.,\nwith . This is known as the variant of IADB, which is the closest formulation to DDIM. In our implementation, we instead use the variant of IADB, which has a slightly different formulation that we do not report for brevity. We experimented with both variants and did not find significant discrepancies in performances.\nDiffusion Autoencoders. All the methods described in the preceding paragraph specifically target unconditional generation. However, in this work we are interested in conditional generation and, more specifically, in a conditional encoder-decoder architecture. For this reason, we build upon the recent work by [11 ###reference_b11###] named Diffusion Autoencoder (DiffAE). The central concept in this approach involves employing a learnable encoder to discover high-level semantic information, while using a DM as the decoder to model the remaining stochastic variations. Therefore, the authors equip a DDIM model with a semantic encoder with that is responsible for compressing the high-level semantic information333In the domain of vision this could be the identity of a person or the type of objects represented in an image. into a latent variable as . The DDIM model is, therefore, conditioned on such semantic representation and trained to reconstruct the data via\nwith and being the variance at the th step. Since the DiffAE represents the state of the art for encoder-decoder models based on diffusion, we build our compositional diffusion framework upon this formulation, which we describe in the following section."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Proposed approach",
|
| 27 |
+
"text": "###figure_1### In compositional representation learning, we hypothesize that the information can be deconstructed into specific, identifiable parts that collectively makes up the whole input. In this work, we posit these parts to be distinct instruments in music but we highlight that this choice is uniquely dependent on the target application. Due to the lack of a widely-accepted description of compositional representations, we formulate a simple yet comprehensive definition that can subsequently be specialized to address particular cases [24 ###reference_b24###, 25 ###reference_b25###]. Specifically, we start from the assumption that observations are realizations of an underlying latent variable model and that each concept is described by a corresponding latent , where with being the total number of possible entities that compose our data. Then, we define a compositional representation of as\nwhere is a composition operator and each is a processing function that maps each latent variable to another intermediate space. By being intentionally broad, this definition does not impose any strong specific constraints a priori, such as the requirement for each subspace to be identical or the algebraic structure of the latent space itself. Hence, to implement this model, we rather need to consider careful intentional design choices and inductive biases.\nIn this work, we constrain the intermediate space to be the data space itself, i.e. for all and we focus on the learning of the latent variables and the processing functions. Finally, we set the composition operator to be a pre-defined function such as or and leave its learning to further investigations."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.1",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Decomposition",
|
| 33 |
+
"text": "In this section, we detail our proposed model, as depicted in Figure 1 ###reference_###. Globally, we follow an encoder-decoder paradigm, where we encode the data into a set of latent representations , where for each . This is done through an encoder network that maps the input to the set of variables , i.e. . Each latent variable is then decoded separately through a parameter-shared diffusion model, which implements the processing function in Equation 5 ###reference_###, mapping the latents to the data space. Finally, we reconstruct the input data through the application of a composition operator and train the system end-to-end through a vanilla iterative (de)Blending (IADB) loss. Specifically, we learn a U-Net network and a semantic encoder via the following objective\nwith , and\nwith and .\nWe chose the IADB paradigm due to its simplicity in implementation and intuitive nature, requiring minimal hyper-parameter tuning.\nAt inference time, we reconstruct the input by progressively denoising an initial random sample coming from the prior distribution, conditioned on the components obtained through the semantic encoder.\nA note on complexity. We found that using a single diffusion model proves effective instead of training separate models for latent variables. Consequently, we opt for training a parameter-sharing neural network . Nonetheless, the computational complexity of our framework is therefore times that of a single DiffAE."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.2",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Recomposition",
|
| 39 |
+
"text": "One of our primary objectives is to endow models with compositional generation, a concept we define as the ability to generate novel data examples by coherently re-composing distinct parts extracted from separate origins. This definition aligns with numerous related studies that posit compositional generalization as an essential requirement to bridge the gap between human reasoning and computational learning systems [26 ###reference_b26###]. In this work, we allow for compositional generation by learning a prior model in the components\u2019 space. Specifically, once we have a well-trained decomposition model we learn a diffusion model in in order to obtain a full generative system. We define and train a IADB model to recover from a masked view . At training time, with probability , we mask each latent variable with a mask and optimize the diffusion model by solving\nwhere and . Here, and denotes the -blended source . At each training iteration we randomly mask via and train the diffusion model to recover the masked elements given the unmasked view . Our masking strategy allows for dropping each latent separately as well as all the latents simultaneously, effectively leading to a model that is able to perform both conditional and unconditional generation at the same time. In our application case, the conditional generation task reduces to the problem of generating variations. As our decomposition model proves to be effective in separating the stems of a given mixture, we obtain a system that is able to generate missing stems given the masked elements. Hence, this also addresses the accompaniment generation task. Algorithm 1 ###reference_### resumes the training process of the prior model."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Experiments and Results",
|
| 45 |
+
"text": "This section provides an overview of the experiments aimed at assessing the performance of our proposal in both decomposition (section 4.1 ###reference_###) and recomposition (section 4.2 ###reference_###) scenarios. Prior to diving into the specifics of each experiment, we provide a brief overview of the shared elements across our experiments, including data, evaluation metrics, and neural network architectures.\nData. We rely on the Slakh2100 dataset [27 ###reference_b27###], a widely recognized benchmark in source separation, comprising 2100 tracks automatically mixed with separate stems. We selected this dataset because of its large-scale nature and the availability of ground truth separated tracks. Following recent approaches in generative models [28 ###reference_b28###, 29 ###reference_b29###], we rely on a pre-trained neural codec to map the audio data to an intermediate latent space, where we apply our approach. Specifically, we employ the EnCodec model [30 ###reference_b30###], a Vector Quantized-VAE (VQ-VAE) model [31 ###reference_b31###] that incorporates Residual Vector Quantization [32 ###reference_b32###] to achieve state-of-the-art performances in neural audio encoding. We take kHz mixtures from the Slakh2100 dataset, which we then feed to the pre-trained EnCodec model to extract the continuous representation obtained by decoding the discrete codes. EnCodec maps raw audio to latent trajectories with a sampling rate of Hz. Specifically, we take audio crops of approximately , which are mapped via EnCodec to a latent code .\nEvaluation metrics. Throughout this section, we report quantitative reconstruction metrics in terms of both Mean Squared Error (MSE) and Multi-Scale Short-Time Fourier Transform (MS-STFT) [33 ###reference_b33###, 34 ###reference_b34###] for latent and audio data, respectively. We perform the MS-STFT evaluation using five STFT with window sizes following the implementation of [34 ###reference_b34###]. In order to evaluate the quality of the generated samples and the adherence to the training distribution, we also compute Fr\u00e9chet Audio Distance (FAD) [35 ###reference_b35###, 36 ###reference_b36###] scores. Specifically, we obtain the FAD scores via the fadtk library [36 ###reference_b36###], employing both the LAION-CLAP-Audio (LC-A) and LAION-CLAP-Music (LC-M) models [37 ###reference_b37###], as it was shown in [36 ###reference_b36###] that these embedding models correlate well with perceptual tests measuring subjective quality of pop music. In assessing FAD scores, we utilize the complete test set of Slakh2100, while for MSE and MS-STFT values, we randomly select 512 samples of ( hour) from the same test set and report their mean and standard deviation. Finally, in order to provide the reader a reference value, we report in Table 1 ###reference_### the reconstruction metrics for the pre-trained EnCodec.\nWhen assessing the effectiveness of source separation models, we adhere to common practice by relying on the museval Python library [38 ###reference_b38###] to compute standard separation metrics: Source-to-Interference Ratio (SIR), Source-to-Artifact Ratio (SAR), and Source-to-Distortion Ratio (SDR) [39 ###reference_b39###]. These metrics are widely accepted for evaluating source separation models, where SDR reflects sound quality, SIR indicates the presence of other sources, and SAR evaluates the presence of artifacts in a source. Specifically, following [39 ###reference_b39###] we compute their scale-invariant (SI) versions and, hence, provide our results in terms of SI-SDR, SI-SIR and SI-SAR. The values shown are expressed in terms of mean and standard deviation computed on samples of from the Slakh2100 test set.\n###table_1### Architectures. We use a standard U-Net [21 ###reference_b21###] with 1D convolution and an encoder-decoder architecture with skip connections. Each processing unit is a ResNet block [40 ###reference_b40###] with group normalization [41 ###reference_b41###]. Following [42 ###reference_b42###], we feed the noise level information through Positional Encoding [43 ###reference_b43###], conditioning each layer with the AdaGN mechanism. We also add multi-head self-attention [43 ###reference_b43###] in the bottleneck layers of the U-Net. The semantic encoder mirrors the U-Net encoder block without the attention mechanism and maps the data to a set of variables whose dimensionality is . Finally, these univariate latent variables condition the U-Net via a simple concatenation, which proved to be a sufficiently effective conditioning mechanism for the model to converge. We use the same U-Net architecture for both the decomposition and recomposition diffusion models."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Decomposition",
|
| 51 |
+
"text": "In order to show the effectiveness of our decomposition method described in section 3.1 ###reference_###, we perform multiple experiments on Slakh2100. Throughout this section, we fix the number of training epochs to and use the AdamW optimizer [44 ###reference_b44###] with a fixed learning rate of as our optimization strategy. The U-Net and semantic encoder have and million trainable parameters, respectively. Finally, we use sampling steps at inference time.\nFirst, we show in Table 2 ###reference_### that our model can be used to perform unsupervised latent source separation and compare it against several non-neural baselines [45 ###reference_b45###, 46 ###reference_b46###, 47 ###reference_b47###, 48 ###reference_b48###, 49 ###reference_b49###], as well as a recent study that explicitly targets neural latent blind source separation [50 ###reference_b50###]. We also report the results obtained by Demucs [51 ###reference_b51###], which is the current top performing fully-supervised state-of-the-art method in audio source separation. As the only non-neural baseline, LASS, has been trained and evaluated on the Drums + Bass subset, we perform our analysis on this split and subsequently perform an ablation study over the other sources.\nAs we can see, our model outperforms the other baselines in terms of SI-SDR and SI-SIR and performs on par with respect to SI-SAR. Interestingly, our model outperforms the Demucs supervised baseline in terms of SI-SIR, which is usually interpreted as the amount of other sources that can be heard in a source estimate. In order to test LASS performances, we used their open source checkpoint which is trained on the Slakh2100 dataset, and followed their evaluation strategy. Unfortunately, we were not able to reproduce their results in terms of SDR but we found that their model performs well in terms of SI-SIR, which they did not measure in the original paper. Moreover, as LASS comprises training one transformer model per source, we found their inference phase to be more computationally demanding than ours. Finally, among non-neural baselines, we see that the HPSS model outperforms the others. This seems reasonable as HPSS is specifically built for separating percussive and harmonic sources and hence naturally fits this evaluation context.\nMoreover, in order to show the robustness of our approach against different sources and number of latent variables, we train multiple models on different subset of the Slakh2100 dataset, namely Drums + Bass, Piano + Bass and Drums + Bass + Piano. The interested reader can refer to our supplementary material and listen to the separation results.\nSubsequently, we show that our objective in Equation 6 ###reference_### is robust across different composition operators. We show that, for simple functions such as sum, min, max and mean our model is able to effectively converge and provide accurate reconstructions. Again, we provide this analysis by training our model on the Drums + Bass subset of Slakh2100, fixing the number of components to . We report quantitative results in terms of two reconstruction metrics, the Mean Squared Error (MSE) and Multi-Scale STFT distance (MS-STFT) in Table 3 ###reference_###. As we can see, sum and mean operators provided the best results, while min and max proved to be less effective. Nonetheless, the audio reconstruction quality measured in terms of MS-STFT provided reconstruction scores that are lower or comparable with respect to those obtained by evaluating EnCodec performances."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.2",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Recomposition",
|
| 57 |
+
"text": "As detailed in section 3.2 ###reference_###, once we are able to decompose our data into a set of composable representations we can then learn a prior model for generation from this new space. Since our decomposition model is able to compress meaningful information through the semantic encoder, we can learn a second latent diffusion model on this compressed representation to obtain a full generative model able to both decompose and generate data.\nHere, we validate our claims by training a masked diffusion model for the Drums + Bass split of the Slakh2100 dataset. In Table 4 ###reference_###, we show that our model can indeed produce good-quality unconditional generations by comparing it against a fully unconditional model. We measure the generation quality in terms of FAD scores computed against both the original as well as the encoded test data. Here, by original data we mean the audio coming from the test split of Slakh2100, while the encoded data represents the same elements reconstructed with our decomposition algorithm. As we train on the representations obtained through the semantic encoder, the natural benchmark for the unconditional generation is given by the reconstructions that we can obtain through our decomposition model, which represents the bottleneck in terms of quality. Nonetheless, we show that the FAD scores do not drop substantially when comparing against the original audio, showing that we can indeed achieve a good generation quality.\nIn the same table, we report the partial generation FAD scores. Instead of generating both components unconditionally, we generate the Bass (Drums) given the Drums (Bass), and measure the FAD against the original and the encoded test data, as done for the unconditional case. Given the presence of a ground-truth element, the FAD scores are lower, which is to be expected. Specifically, we can see that the drums generation is a more complex task with respect to the bass generation, as the model needs to synthesize more elements such as the kick, snare and hi-hats, matching the timing of a given bassline.\nLastly, as we strive for high-quality generations, we also aim to enhance diversity within our generations.\nTable 5 ###reference_### shows the diversity scores for partial generations obtained with our model. We measure diversity in terms of MSE and MS-STFT scores computed, respectively, in the latent and audio space. We compare our partial generations against real and random components, in order to provide the lower and upper bound for generation diversity. Specifically, given the Drums (Bass) we generate the Bass (Drums) and we compute both MSE and MS-STFT scores against the ground truth (Real) and random elements (Rand) coming from the test set of Slakh2100. From the values reported in Table 5 ###reference_###, we can deduce that our model produces meaningful variations. We invite the interested readers to listen to our results on our support website."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Discussion and Further Works",
|
| 63 |
+
"text": "While our model proves to be effective for compositional representation learning, it still has shortcomings. Here, we briefly list the weaknesses of our proposal and highlight potential avenues for future investigations.\nFactors of convergence. In this paper, we used EnCodec which already provides some disentanglement and acts as a sort of initialization strategy for our method. We argue that this property, jointly with the low dimensionality of the latent space enforced by our encoder leads our decomposition model to converge efficiently, not requiring further inductive biases towards source separation.\nLimitations. First, there is no theoretical guarantee that the learned latent variables are bound to encode meaningful information. Exploring more refined approaches, as proposed by [52 ###reference_b52###], could be interesting in order to incorporate a more principled method for learning disentangled latent representations. Furthermore, we observed that the dimensionality of the latent space significantly influences the representation content. A larger dimensionality allows the model to encode all the information in each latent, hindering the learning of distinct factors. Conversely, a smaller dimensionality may lead to under-performance, preventing the model to correctly converge. It could be interesting to investigate strategies such as Information Bottleneck [53 ###reference_b53###] to introduce a mechanism to explicitly trade off expressivity with compression. Finally, using more complex functions as well as learnable operators is an interesting research direction for studying the interpretability of learned representations."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Conclusions",
|
| 69 |
+
"text": "In this work, we focus on the problem of learning unsupervised compositional representations for audio. We build upon recent state-of-the-art diffusion generative models to design an encoder-decoder framework with an explicit inductive bias towards compositionality. We validate our approach on audio data, showing that our method can be used to perform latent source separation. Despite the theoretical shortcomings, we believe that our proposal can serve as a useful framework for conducting research on the topics of unsupervised compositional representation learning."
|
| 70 |
+
}
|
| 71 |
+
],
|
| 72 |
+
"appendix": [],
|
| 73 |
+
"tables": {
|
| 74 |
+
"1": {
|
| 75 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T1.2\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.2.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.2.1.1.1\">MS-STFT</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.2.1.1.2\">FAD (LC-A)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.2.1.1.3\">FAD (LC-M)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.2.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.2.2.2.1\">4.7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.2.2.2.2\">0.05</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.2.2.2.3\">0.04</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.3.1.1\">Table 1</span>: </span>EnCodec reconstruction quality, measured in terms of MS-STFT and FAD and computed following the procedure descried in section <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#S4\" title=\"4 Experiments and Results \u2023 Unsupervised Composable Representations for Audio\"><span class=\"ltx_text ltx_ref_tag\">4</span></a>.</figcaption>\n</figure>",
|
| 76 |
+
"capture": "Table 1: EnCodec reconstruction quality, measured in terms of MS-STFT and FAD and computed following the procedure descried in section 4."
|
| 77 |
+
},
|
| 78 |
+
"2": {
|
| 79 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.3\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T2.3.3.4\">Model</th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T2.1.1.1\">SI-SDR ()</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T2.2.2.2\">SI-SIR ()</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T2.3.3.3\">SI-SAR ()</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.4.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.3.4.1.1\">rPCA <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib45\" title=\"\">45</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.4.1.2\">-2.8 (4.8)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.4.1.3\">5.2 (7.3)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.4.1.4\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.3.4.1.4.1\">5.6</span> (4.6)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.5.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.3.5.2.1\">REPET <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib48\" title=\"\">48</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.5.2.2\">-0.5 (4.8)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.5.2.3\">6.8 (7.0)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.5.2.4\">3.0 (5.2)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.6.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.3.6.3.1\">FT2D <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib49\" title=\"\">49</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.6.3.2\">-0.2 (4.7)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.6.3.3\">5.1 (7.0)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.6.3.4\">3.1 (4.7)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.7.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.3.7.4.1\">NMF <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib46\" title=\"\">46</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.7.4.2\">1.4 (5.0)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.7.4.3\">8.9 (7.6)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.7.4.4\">2.9 (4.5)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.8.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.3.8.5.1\">HPSS <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib47\" title=\"\">47</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.8.5.2\">2.3 (4.8)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.8.5.3\">9.9 (7.5)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.8.5.4\">5.1 (4.6)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.9.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.3.9.6.1\">LASS <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib50\" title=\"\">50</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.9.6.2\">-3.3 (10.8)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.9.6.3\">17.7 (11.6)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.9.6.4\">-1.6 (11.2)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.10.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.3.10.7.1\">Ours</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.10.7.2\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.3.10.7.2.1\">5.5</span> (4.6)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.10.7.3\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.10.7.3.1\">41.7</span> (9.3)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.10.7.4\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.3.10.7.4.1\">5.6</span> (4.6)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.11.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb ltx_border_t\" id=\"S4.T2.3.11.8.1\">Demucs <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09792v1#bib.bib51\" title=\"\">51</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.3.11.8.2\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.11.8.2.1\">11.9</span> (5.0)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.3.11.8.3\">\n<span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.3.11.8.3.1\">37.6</span> (8.7)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.3.11.8.4\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.3.11.8.4.1\">12.0</span> (5.0)</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.9.1.1\">Table 2</span>: </span>Blind source separation results for the <span class=\"ltx_text ltx_font_italic\" id=\"S4.T2.10.2\">Drums + Bass</span> subset. Our model is trained with the <span class=\"ltx_text ltx_font_italic\" id=\"S4.T2.11.3\">mean</span> composition operator. The results are expressed in dB as the mean (standard deviation) across elements randomly sampled from the test set of Slakh2100.</figcaption>\n</figure>",
|
| 80 |
+
"capture": "Table 2: Blind source separation results for the Drums + Bass subset. Our model is trained with the mean composition operator. The results are expressed in dB as the mean (standard deviation) across elements randomly sampled from the test set of Slakh2100."
|
| 81 |
+
},
|
| 82 |
+
"3": {
|
| 83 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.3.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T3.3.3.4\">Operator</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T3.2.2.2\">MSE () \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T3.3.3.3\">MS-STFT ()</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.3.4.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T3.3.4.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T3.3.4.1.1.1\">Sum</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.3.4.1.2\">1.87820 (0.13418)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.3.4.1.3\">3.6 (0.1)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.5.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.3.5.2.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T3.3.5.2.1.1\">Mean</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.5.2.2\">1.87020 (0.13183)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.5.2.3\">3.6 (0.1)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.6.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.3.6.3.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T3.3.6.3.1.1\">Min</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.6.3.2\">2.54182 (0.17714)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.6.3.3\">4.5 (0.1)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.7.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T3.3.7.4.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T3.3.7.4.1.1\">Max</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T3.3.7.4.2\">2.43302 (0.17510)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T3.3.7.4.3\">4.3 (0.1)</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.6.1.1\">Table 3</span>: </span>Reconstruction quality in latent space (MSE) and audio (MS-STFT) of our decomposition-recomposition model for different recomposition operators for the <span class=\"ltx_text ltx_font_italic\" id=\"S4.T3.7.2\">Drums + Bass</span> subset.</figcaption>\n</figure>",
|
| 84 |
+
"capture": "Table 3: Reconstruction quality in latent space (MSE) and audio (MS-STFT) of our decomposition-recomposition model for different recomposition operators for the Drums + Bass subset."
|
| 85 |
+
},
|
| 86 |
+
"4": {
|
| 87 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.5\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.5.6.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T4.5.6.1.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S4.T4.5.6.1.2\">Original</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S4.T4.5.6.1.3\">Encoded</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.4.4\">\n<th class=\"ltx_td ltx_th ltx_th_row\" id=\"S4.T4.4.4.5\"></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.1.1.1\">FAD (LC-A) ()</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T4.2.2.2\">FAD (LC-M) ()</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.3.3.3\">FAD (LC-A) ()</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.4.4.4\">FAD (LC-M) ()</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.5.7.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T4.5.7.2.1\">Unconditional</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.7.2.2\">0.09</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.5.7.2.3\">0.09</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.7.2.4\">0.06</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.7.2.5\">0.06</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.5.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T4.5.5.1\"></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.5.5.2\">0.12</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T4.5.5.3\">0.11</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.5.5.4\">0.08</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.5.5.5\">0.07</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.5.8.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T4.5.8.3.1\">Bass</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.8.3.2\">0.03</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T4.5.8.3.3\">0.03</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.8.3.4\">0.01</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.5.8.3.5\">0.01</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.5.9.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T4.5.9.4.1\">Drums</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.5.9.4.2\">0.09</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T4.5.9.4.3\">0.08</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.5.9.4.4\">0.05</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T4.5.9.4.5\">0.05</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.9.1.1\">Table 4</span>: </span>Audio quality of unconditional generations by our generative model. We demonstrate that we can jointly learn an unconditional and conditional model by showing that the FAD scores of are comparable to those of an unconditional latent diffusion model.</figcaption>\n</figure>",
|
| 88 |
+
"capture": "Table 4: Audio quality of unconditional generations by our generative model. We demonstrate that we can jointly learn an unconditional and conditional model by showing that the FAD scores of are comparable to those of an unconditional latent diffusion model."
|
| 89 |
+
},
|
| 90 |
+
"5": {
|
| 91 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T5.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T5.1.1.2\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T5.1.1.3\">Type</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T5.1.1.1\">MSE \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T5.1.1.4\">MS-STFT</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T5.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"S4.T5.1.2.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T5.1.2.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T5.1.2.1.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T5.1.2.1.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T5.1.2.1.1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T5.1.2.1.1.1.1.1.1.1\" style=\"width:6.9pt;height:19.6pt;vertical-align:-6.3pt;\"><span class=\"ltx_transformed_inner\" style=\"width:19.6pt;transform:translate(-6.32pt,0pt) rotate(-90deg) ;\">\n<span class=\"ltx_p\" id=\"S4.T5.1.2.1.1.1.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.1.2.1.1.1.1.1.1.1.1.1\">Real</span></span>\n</span></span></span></span>\n</span></span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T5.1.2.1.2\">Drums</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.2.1.3\">2.3259 (0.1287)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.2.1.4\">13.6 (0.4)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T5.1.3.2.1\">Bass</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.3.2.2\">1.4393 (0.0874)</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.1.3.2.3\">9.38 (0.2)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.4.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_t\" id=\"S4.T5.1.4.3.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T5.1.4.3.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T5.1.4.3.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T5.1.4.3.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T5.1.4.3.1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T5.1.4.3.1.1.1.1.1.1\" style=\"width:6.9pt;height:23.5pt;vertical-align:-8.3pt;\"><span class=\"ltx_transformed_inner\" style=\"width:23.5pt;transform:translate(-8.26pt,0pt) rotate(-90deg) ;\">\n<span class=\"ltx_p\" id=\"S4.T5.1.4.3.1.1.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.1.4.3.1.1.1.1.1.1.1.1\">Rand</span></span>\n</span></span></span></span>\n</span></span></th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T5.1.4.3.2\">Drums</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.4.3.3\">4.8170 (0.1136)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.1.4.3.4\">20.5 (0.6)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.1.5.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T5.1.5.4.1\">Bass</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.5.4.2\">4.8814 (0.1157)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T5.1.5.4.3\">21.7 (0.7)</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.3.1.1\">Table 5</span>: </span>Diversity of variations generated by our prior model, measured via the MSE and MS-STFT distances against ground truth and random components.</figcaption>\n</figure>",
|
| 92 |
+
"capture": "Table 5: Diversity of variations generated by our prior model, measured via the MSE and MS-STFT distances against ground truth and random components."
|
| 93 |
+
}
|
| 94 |
+
},
|
| 95 |
+
"image_paths": {
|
| 96 |
+
"1": {
|
| 97 |
+
"figure_path": "2408.09792v1_figure_1.png",
|
| 98 |
+
"caption": "Figure 1: The overall architecture of our decomposition model. We first mix the sources, map the data \ud835\udc31\ud835\udc31\\mathbf{x}bold_x to the latent space through a frozen, pre-trained EnCodec model, and then decompose it into a set of latent variables (two shown here). These variables then condition a parameter-sharing diffusion model whose generation are then recomposed by an operator C\ud835\udc36Citalic_C.",
|
| 99 |
+
"url": "http://arxiv.org/html/2408.09792v1/extracted/5799587/figs/modmod.png"
|
| 100 |
+
}
|
| 101 |
+
},
|
| 102 |
+
"validation": true,
|
| 103 |
+
"references": [],
|
| 104 |
+
"url": "http://arxiv.org/html/2408.09792v1"
|
| 105 |
+
}
|
20240819/2408.09819v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09842v1.json
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Can we measure the impact of a database?",
|
| 3 |
+
"abstract": "In disseminating scientific and statistical data, on-line databases have almost completely replaced traditional paper-based media such as journals and reference works.\nGiven this, can we measure the impact of a database in the same way that we measure an author\u2019s or journal\u2019s impact?\nTo do this, we need somehow to represent a database as a set of publications, and databases typically allow a large number of possible decompositions into parts, any of which could be treated as a publication.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "It is almost self-evident that databases exist to publish data, and this is undoubtedly the case for scientific and statistical databases, which have largely replaced traditional reference works.\nDatabase and web technology has led to an explosion in the number of databases\nthat support scientific research for obvious reasons: databases provide faster communication of knowledge, they hold larger volumes of data, they are more easily searched, and they are both human and machine-readable.\nMoreover, they can be developed rapidly and collaboratively by a mixture of researchers and curators.\nFor example, more than 1500 curated databases are relevant to molecular biology alone (Imker, 2018 ###reference_b13###).\nThe value of these databases lies not only in the data they present but also in how they organize that data.\nIf we want to measure the impact of a database, can we use its organization to treat it in the same way that we treat any other publishing agent, such as a journal or an author?\nIn the case of an author or journal, most bibliometric measures are obtained from the citations to the associated set of publications.\nFor a database, there are typically many ways of decomposing it into publications, so we might use its organization to guide in the choice of decompositions.\nWe shall show that, when the database has a hierarchical structure, there is a natural extension of the h-index that works on hierarchies.\nAlthough the main results presented in this paper are the evaluations of the h-indexes of some well-known databases, this was not the original motivation.\nOne of the authors was involved in a project (Buneman et al., 2020 ###reference_b7###) to automatically generate a set of conventional papers to give credit \u2013 as authors \u2013 to the 1000 or so researchers who had contributed to a database (Harding et al., 2018 ###reference_b11###).\nBy creating one publication for the whole database \u2013 as might happen with data papers which periodically publish data summaries and are citation proxies for databases (Candela et al., 2015 ###reference_b9###), we generate a document with an unhelpfully huge number of authors. Also, these authors receive only one additional publication credit, regardless of whether they contributed to numerous sections of the database or just a single part.\nOn the other hand, generating thousands of documents, one for each \u201dobject\u201d in the database, is almost equally useless.\nWhile the authors are now associated with their areas of expertise, they are unwittingly guilty of having minimal publishing units.\nThe tension between those two extremes underlies the rationale for the h-index (Hirsch, 2005 ###reference_b12###), so the obvious question is can we extend the h-index to work on databases and \u2013 given that there is a hierarchical structure \u2013 is there a natural extension of the h-index to hierarchies?\nWe then have at least one non-trivial measure of the importance of a database.\nHowever, even if this measure is not of interest, the decomposition that produced it may well be of interest, perhaps as a starting point to the curators, who want to find a useful set of publications to associate with the database for the benefit of the contributors or curators who would like to have their work properly cited.\nHierarchies are used frequently in curated databases.\nAs in the three examples we use in this paper (Harding et al., 2018 ###reference_b11###; Wishart et al., 2018 ###reference_b20###; Schoch et al., 2020 ###reference_b17###), they are based on some kind of hierarchical classification scheme, taxonomy or ontology.\nMoreover, data sets based on a file system or data format such as JSON or XML have an intrinsic hierarchical structure (we shall refer to all of these as databases).\nWe also note that, when two very similar papers are published (e.g., a preprint and its final peer-reviewed version), citation analyzers such as Google Scholar have an option to transfer citations from one paper to the other (the parent).\nGiven that a paper can have at most one parent and that the parent relationship is acyclic, there is at least a partial hierarchy (a set of tree like structures) already present in the data structure maintained by the software.\nGiven a hierarchy, how do we use it to limit the possible decompositions into sets of publications?\nConsider hierarchy in the DrugBank database shown in Figure 1 ###reference_###.\nIn this database, citations are only to the terminal nodes or leaves of the tree.\nIf we want a citation count for some higher-level node, we would use the sum of the citation counts of the leaves below that node, much as we would take the citation count for an issue of a journal to be the sum of the citation counts of the papers in that issue.\nNow, we propose to use a subset of these nodes as a possible decomposition; however we cannot use an arbitrary subset, because it allows double-counting\nof citations if we allow a node and one of its ancestors to appear in the same decomposition.\nThus we restrict our attention to antichains of nodes: an antichain is a set of nodes in which no node is an ancestor of another node in that set.\nLooking at the Linnean-style stratification in Figure 1 ###reference_###, the kingdom, superclass and class nodes each constitute antichains as do the drug nodes (the subclass nodes can have other subclass nodes as ancestors and do not).\nAlso, the root node represents the database as a whole, and the leaf nodes represent the individual drugs, which both constitute hierarchies.\nNow, the h-index, which is used almost universally to measure an author\u2019s output and often the importance of a journal; it is one of the few metrics that measure both the productivity and the citation impact of authors (Teixeira da Silva and Dobr\u00e1nszki, 2018 ###reference_b18###).\nIt is defined as follows:\nGiven a set of publications, its h-index is the largest number for which there is a subset of of size in which each publication has at least citations.\nThe extension to hierarchies is immediate:\nGiven a hierarchy of publications, its h-index is the largest number for which there is an antichain in of size in which each publication has at least citations.\nWe will formalize this in the following section, but we should make some important observations immediately.\nFirst, when the hierarchy is \u201cflat\u201d (there is no ordering), the two definitions coincide.\nSecond, although a set has an exponential number of subsets, its h-index can be efficiently computed by sorting the set.\nSimilarly, a hierarchy can have an exponentially large number of antichains, and we will demonstrate that, by using the appropriate data structures, the h-index of a hierarchy can be evaluated with the same efficiency.\nThis is crucial if we are going to apply it to large databases.\nThird, using a hierarchy to constrain the possible decompositions is essential.\nFor example,\nbased on a reduction from the Partition problem, one can show that the more general problem of finding the maximal h-index under an arbitrary partition is strongly NP-hard (de Keijzer and Apt, 2013 ###reference_b10###), and there are algorithms and complexity research on improving and maximizing the citation indices (e.g., h-index, -index, and -index) under different merging rules (de Keijzer and Apt, 2013 ###reference_b10###; Van Bevern et al., 2016 ###reference_b19###; Pavlou and Elkind, 2016 ###reference_b15###).\nFinally, much of this research can be seen as an attempt to \u201cgame\u201d the h-index \u2014 finding some merging strategy that enhances one\u2019s h-index.\nIf, for a hierarchical organization, we equivalently define the h-index of a hierarchy as the maximal h-index of any antichain within the hierarchy, then it might seem as though we are also engaged in gaming.\nHowever, this is not the case; we are merely employing what appears to be the most natural extension of the h-index definition to hierarchies.\n###figure_1### In the following sections, we first formalize the problem and develop some simple results on antichains that\nallow that the maximal h-index to be computed efficiently.\nFollowing this, we give three examples of databases from which we can extract an h-index and look at some of the details of their hierarchical organization.\nWe conclude with some speculation on how these results might be further developed.\nThere are two major caveats to what we have to offer.\nThe first is that we are not going to comment on the justification or fairness of the h-index and its variants (Bornmann and Daniel, 2007 ###reference_b5###; Yong et al., 2014 ###reference_b22###);\nwe only observe that it is almost universally adopted in measuring the impact of both authors and journals and \u2013 as we have noted above \u2013 it may be useful in helping to decompose a database into citable units.\nSecond, the results we present here are preliminary.\nAs we have already remarked, the practice of data citation is still in its infancy; people still fail to cite databases or cite them improperly.\nFor comparison, we have included the results of using URLs as citations.\nHence, the results we give should in no way be taken as an accurate measure of the importance of the databases we examine.\nThey demonstrate appropriate techniques and the feasibility of measuring the h-index at scale."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "2. The h-index of a hierarchy",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "2.1. Hierarchies and antichains",
|
| 21 |
+
"text": "###figure_2### Our starting point is that we can model a database as a hierarchy together with a citation count for each node in the hierarchy.\nOur goal is to select a subset of nodes, aggregate the citations associated with those, and thus compute the h-index of such set of nodes.\nIn choosing a subset of nodes, we cannot double-count citations.\nFor example, in Figure 1 ###reference_### if we decide \u2013 to compute an h-index \u2013 that the Lactones node (node g at the \u201cclasses\u201d level) is to be a publication, the citations to that node include all the citations to drugs beneath that node, so we cannot use a citation to one of those drugs as a citation to some other node.\nThis means that we limit our candidate sets to those in which no node is an ancestor of another, or antichains.\nTo illustrate this, Figure 2 ###reference_### shows a hierarchy in which the root constitutes an antichain, as does and also the leaves.\nIn this diagram, the level of a node is determined by its citation count or rank.\nEven though the number of antichains in a hierarchy can be exponentially large, to compute the h-index, we can use the rank to cut down the number of antichains we need to examine.\nFor this, we need a small amount of formal development.\nA hierarchy is a partial order \nsuch that for all , and implies that or .\nThis definition of hierarchy ensures that a node can have at most one parent. We note that is the parent of a node if it is the smallest node , distinct from , such that .\nA set is called antichain if, for any distinct pair , neither nor .\nA ranked hierarchy is a hierarchy \ntogether with a rank (or level) function , such that for all , if then .\nGiven a subset of , we will use the term minimal elements of to refer to those nodes for which there is no , distinct from , such that . That is, the minimal elements of are the lowest nodes in the hierarchy belonging to .\nGiven a ranked hierarchy , an -antichain in H is an antichain such that for all :\n,\nif , then .\nIn other words, an -antichain is a set in which each node has rank at least and all its children have rank less than . In Figure 2 ###reference_### the set is a -antichain and the leaves form a -antichain. Note that a leaf (a node with no children) with rank at least could be a member of a -antichain.\nWe shall use the term rank-minimal to describe the elements of a set that are minimal with respect to the rank function , i.e., those elements for which is minimum for .\nThe following result guarantees that, given an antichain with minimal rank , it is possible to find an -antichain which has no fewer elements.\nLet be an antichain in a ranked hierarchy with a rank-minimal node of rank .\nThen, there exists an -antichain such that .\nThe proof is straightforward and given in the additional material (A.1 ###reference_theorem1###); one constructs the antichain by a process that is similar to the top-down algorithm, which we describe shortly.\nFrom this, we can obtain the main result. The h-index of an antichain in a ranked hierarchy is defined as .\nFor any antichain in a ranked hierarchy, there is an -antichain such that .\nLet be the h-index of . Then there is a subset for which for all . A rank-minimal element of S will have rank and Prop. 2.6 ###reference_theorem6### gives us an -antichain with at least as many elements as , i.e., has an h-index no less than .\u220e\nThe importance of this result is that we only have to search maximal -antichains to find the h-index of any antichain in . The number of such antichains is not greater than the number of nodes, which guarantees us at least a polynomial time algorithm. In fact, by pursuing a top-down approach, we can obtain a very efficient algorithm which, once the hierarchy is constructed, will, in practice, only visit a subset of the nodes in the hierarchy."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "2.2. A top-down algorithm",
|
| 27 |
+
"text": "Consider the -antichains in Figure 2 ###reference_###. At the root, we have a -antichain of length 1 and below it a -antichain of length 1. Proceeding downwards, the first longer antichain is the -antichain of length 2. Further down we find the -antichain of length 5. This is the -antichain of greatest h-index: it has 5 nodes, all with rank greater or equal to 5. As we proceed downwards, the level is strictly decreasing while the length of the antichains is non-decreasing. When the latter overtakes the former, we have just \u201dovershot\u201d the antichain that yields the maximal h-index, and we can stop. Note that we have stopped before examining all the nodes in the hierarchy.\nIn the following we use the notation for the minimal elements of a subset , i.e., an antichain.\n is defined similarly.\nAn -antichain can thus be defined as .\nIn order to compute successively lower antichains, the algorithm maintains, for decreasing values of , the following two antichains:\n, i.e., the -antichain of maximum cardinality among all -antichains;\n, i.e., the antichain of nodes that are maximal (with respect to ) in the set of nodes with rank less than .\nThese two antichains have the following properties:\nthe children of any element in are contained in ;\nthe next lowest rank of any node in the hierarchy is possessed by at least one node in ;\nAlso, we observe that: i) a node is in iff and for all children of , ; ii) a node is in iff and the parent of is such that .\nAlgorithm 1 ###reference_### details the procedure used to find the antichain that guarantees the maximum h-index. On lines 3 and 13, the nodes with the highest rank (i.e., citation count) are found in the . These nodes are removed from the DCHAIN (line 7) and added to the (line 8). The children of these nodes are then added to (lines 9 and 10).\nThen, in lines 11 and 12, all the non-minimal nodes in are removed, making it a proper -antichain.\nThe process continues until the cardinality of is bigger than the highest rank in . At this point, each node in has a rank . Thus even if it were to be moved from to there would not be an increase in the value of the h-index for . At this point, the algorithm can stop."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "2.3. Algorithm discussion",
|
| 33 |
+
"text": "Referring to Algorithm 1 ###reference_###, the most obvious thing to do is to use heaps to implement and .\nThe heap values for each node DCHAIN should be the node rank, while the values for those in LCHAIN should be the maximal rank of the children of the node, which can be computed at line 9.\nWith these representations, the \u201csearch\u201d operations on lines 3, 6, 11, and 13 all incur unit cost, while the insert or delete\noperations on lines 7, 8, 10, and 12 are all in the length of the antichain. This gives a bound of in the hierarchy size for the whole algorithm.\nA further observation is that at line 9, we need only consider child nodes for inclusion if their rank is greater\nthan the current length of LCHAIN; if this is not the case, they cannot participate in the final antichain, and they\ntogether with their descendants in the hierarchy may be safely ignored. In describing the results for the various databases in Section 3 ###reference_### we have included two figures: visited (vis.) \u2013 the number of nodes whose rank was interrogated and digested (dig.) \u2013 the number of nodes that entered DCHAIN.\nThe algorithm\u2019s running time will be bounded by a constant in vis. and a constant times in dig.\nIn the measurements for a whole hierarchy (not the truncated ones where we cut off the hierarchy at a certain level), the digested nodes accounted for less than of the total.\nAnother observation is that the algorithm requires that the set of nodes as input is a hierarchy, and thus that this hierarchy is built before this algorithm is run.\nFinally, we note that in the degenerate case of a hierarchy made of only leaves, the algorithm requires time to create the LCHAIN heap."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "3. Experimental Analysis",
|
| 39 |
+
"text": "Using this algorithm, we compute and analyze the computation of the h-index of three widely-used databases: Drugbank which we describe in Section 1 ###reference_###; the IUPHAR/BPS Guide to Pharmacology (Harding et al., 2018 ###reference_b11###) (GtoPdb), which is an open-access database on the biological targets of drugs and other molecules; and, the taxonomic database curated by the National Center for Biotechnology Information (NCBI), which maintains one of the most comprehensive and best-organized collections of medical data (McEntyre and Ostell, 2002 ###reference_b14###).\nThe online additional material details all the information about these databases and how we collected the corresponding citation counts."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "3.1. Drugbank",
|
| 45 |
+
"text": "###table_1### The results of the analysis on Drugbank are shown in Table 1 ###reference_###, where median and maximum refer to the citation counts of the antichain that yields the given h-index (the median is sometimes given in reporting the h-index of a journal).\nThe visited (vis.) and digested (dig.) columns refer to the discussion about Algorithm 1.\nOnly a few nodes are leaves in the antichain for the entire hierarchy. The Leaves/drugs row shows the h-index for the \u201cflat\u201d hierarchy of the drugs themselves, that is, the h-index obtained considering only the drugs as a set of \u201cpublications\u201d that receive citations, without considering the hierarchy of the database.\nThe Class and Subclass rows show the results for the hierarchies obtained by removing all nodes below those strata. The table clearly shows the advantage to computing the h-index on a full hierarchy and second that constraining the antichains to a given stratum reduces the h-index. We also see that the algorithm calculates the h-index visiting approximately one-third of the nodes in the entire hierarchy."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "3.2. GtoPdb",
|
| 51 |
+
"text": "The IUPHAR/BPS Guide to Pharmacology (GtoPdb) is an open-access relational database with an accompanying website on the biological targets of drugs and other molecules. As shown in Figure 3 ###reference_###, GtoPdb has a hierarchical structure. It combines the expertise of approximately 1,000 researchers worldwide and was an early stimulus for data citation (Buneman and Silvello, 2010 ###reference_b8###) to give credit to the contributors. Here, however, we are using it to assess credit to the database as a whole.\n###figure_3### A critical difference between Drugbank and GtoPdb is that in GtoPdb, there are citations to the intermediate nodes in the hierarchy. In fact the data for these \u201dfamily\u201d nodes often resembles short conventional papers with descriptive material and statistics on the targets (leaf nodes) included in the family. This raises the possibility that we might want to treat a family node as independent of the nodes it describes, and we introduce a lifting transformation that allows us to do this.\nTable 2 ###reference_### presents the results for the GtoPdb database divided into the horizontal sections \u201cCitations\u201d\nand \u201cLinks\u201d. The first section corresponds to results obtained with citations extracted from the citing papers, while the second part is obtained by accounting for web links as citations.\nAs with Drugbank, the first two rows of each section in the table show the analysis\non the full hierarchy and on the \u201cflat\u201d collection of nodes, which\nnow contains a substantial number of cited intermediate (family)\nnodes together with the leaf target nodes. The \u201cFamilies only\u201d shows that little is lost by removing\nthe leaves from the hierarchy, thus suggesting that many citations are also given to the intermediate nodes in the hierarchy.\nThe meaning of the \u201cLifted hierarchy\u201d row is discussed below.\nIt is also interesting to note that families of nearly all heights \u2013 where\nby height, we mean the maximum number of edges to a leaf node \u2013 figure\nin the antichain that provides the h-index for the full hierarchy.\nThe figures are: 5 at height 3, 9 at height 2, 4\nto leaves, and the remaining 37 to bottom level families at height 1.\n###figure_4### Lifting.\nFor reasons mentioned in the additional material, the Links section of Table 2 ###reference_###, is based on highly skewed citation counts, and the results are almost meaningless. We have included it in this section because it shows an interesting phenomenon.\nWe see \u2013 perhaps surprisingly \u2013 that computing the h-index on the full hierarchy gives a lower figure than that for the set of nodes with no structure (167 vs 172).\nIt appears that creating a hierarchy on a set of publications, in this case the GtoPdb web pages, and summing the citation counts\nas we have proposed, does not necessarily increase the h-index with respect to the simple set of all the publications.\nConsider now the hierarchy in Figure 4 ###reference_###.a. Each node is annotated on the left with its number of \u201cdirect\u201d citations and on the right with the sum of the number of citations to that node and to all of its descendants.\nIt is easy to see that, for this first hierarchy, the longest antichain is the set , which has an\nh-index of 2.\nHowever, had we considered the set of nodes without a hierarchy and with a number of citations as the one obtained by direct citations to them ( respectively), its h-index would be 4.\nTherefore, in this example, the use of the hierarchy does not bring the highest h-index that could be obtained with the available publications.\nOn the other hand, had Figure 4 ###reference_###.a been part of a larger hierarchy, it might have been advantageous to have a node such as with a relatively high rank.\nThe implication is that, in a bigger hierarchy, new and longer antichains could have been found, where could contribute to the generation of a greater h-index.\nThe problem is that in the computation of the h-index, as described in Section 2 ###reference_###, the inclusion of a node in an antichain precludes the inclusion of any ancestor or descendant node in the same antichain.\nThus, a descendant and ancestor node cannot be considered independently in the computation of the h-index.\nIn a case such as GtoPdb, however, the information associated with a family may complement the information\nassociated with an object (child) in that same family. Therefore, It is reasonable to ask that both parent and child could be counted in the computation of the h-index.\nTo deal with this problem, we introduce a transformation called lifting: for each internal node \nin the hierarchy, a surrogate node is created.\n becomes the parent of along with all the children of , and becomes a leaf.\n\u2019s rank is now determined only by its direct citations, and the new node enters with a number of direct citations equal to and a rank equal to the sum of its children\u2019s citations.\nFigure 4 ###reference_###.b shows the result of applying this transformation to Figure 4 ###reference_###.a.\nNote how the set of all non-surrogate nodes now can be an antichain at rank 4.\nAfter applying this transformation in the GtoPdb hierarchy, the results shown in Table 2 ###reference_###(\u201cLifted Hierarchy\u201d row) show an h-index of 55 with data citations, and 179 with links.\nWe mentioned in the introduction that, for conventional papers, when two papers are similar, one may designate one of the papers as the parent, which will then receive all the citations for the child.\nWhether it is advantageous to combine the two depends on the authors.\nThe advisor with a high existing h-index might want to do this, but the student with few publications might not.\nA lifted representation of the publication hierarchy and computing the h-index of an author on that hierarchy would keep both advisor and student happy!"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.3",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "3.3. NCBI Taxonomy",
|
| 57 |
+
"text": "Among the databases maintained by NCBI is the taxonomy database (Schoch et al., 2020 ###reference_b17###), which contains \u201ca list of names that are determined to be nomenclatural correct or valid (as defined according to the different codes of nomenclature), classified in an approximately phylogenetic hierarchy\u201d111https://www.ncbi.nlm.nih.gov/books/NBK53758/ ###reference_8/###.\nThe hierarchy contains some 2.3 million nodes or taxons, and for each node, the web interface displays incoming links from other NCBI databases and Pubmed, which can be considered as citations from conventional literature. The structure of NCBI is similar to that of IUPHAR/BPS as all nodes in the hierarchy are citable.\nThere are approximately 3.2 billion incoming links for all nodes, of which 7.8 million are from Pubmed.\nFor reasons described in the additional material, we confined our attention to the vertebrate and the animal sub-hierarchies. Vertebrates are a subset of animals, and animals account for approximately half the nodes in the complete hierarchy.\nThe results on the NCBI taxonomy hierarchy are shown in Table 3 ###reference_###, where they are divided between the results from only the incoming links from Pubmed and the results obtained using all the available links. Unsurprisingly, the h-index for animals is substantially greater than that of its sub-hierarchy of vertebrates. Moreover, measuring an h-index using incoming links gives a substantially higher result and indicates the magnitudes we might see if we measure h-indexes using links rather than conventional citations. Lifted hierarchies noticeably raise the results."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "4. Conclusions",
|
| 63 |
+
"text": "This paper demonstrates that databases can be treated similarly to authors and journals for the purpose of measuring scholarly impact.\nWe have shown a natural and efficient way to compute the h-index of a hierarchy of citable units and to apply this to computing the h-index of databases.\nPreliminary results show that, for some well-known curated databases, the h-index, when measured by citations from conventional literature, is comparable with that of journals.\nWhen measured by links from other databases, it can be substantially higher.\nThe results are preliminary because data citation is not as widely practiced as it should be.\nThroughout this paper, we have deliberately avoided any discussion of the rights and wrongs of using the h-index as a measure of the impact or quality of research output, which is a matter of continuing discussion (Yong et al., 2014 ###reference_b22###; Brito and Navarro, 2021 ###reference_b6###).\nOne empirical and theoretical observation is that the h-index is correlated with the square root of the total number of citations.\nIn fact the ratio is in the neighborhood of 0.5.\nThis holds for our results when considering citations from the literature.\nHowever, the results are very different when we measure \u2013 as for the NCBI taxonomy \u2013 an h-index for incoming links, whose total is in the billions.\nHere the ratio is an order of magnitude lower, indicating a skewed distribution that requires further investigation.\nThere are many more open problems.\nThe most obvious is what one can do without an apparent hierarchy.\nIf no classification scheme is available, some form of cluster analysis can nearly always find one.\nThe more likely problem is that there are several classification schemes (Ashburner et al., 2000 ###reference_b4###), and one may want to choose or combine them.\nAlso, a classification scheme may not be hierarchical because a node may have more than one parent.\nApart from coming up against the complexity issues described in Section 1 ###reference_###, one has the prior problem of how the citations for a given node should be distributed among its parents to achieve a sensible ranking.\nIt may be that the notion of disjunctive citations (Wu et al., 2018 ###reference_b21###) could help with this.\nFinally, unlike conventional publications, databases evolve over time, yet for the purpose of citation, we want to treat them as conventional, immutable publications.\nThis is a general problem with data citation and will also affect how we measure the impact of databases."
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"appendix": [
|
| 67 |
+
{
|
| 68 |
+
"section_id": "Appendix 1",
|
| 69 |
+
"parent_section_id": null,
|
| 70 |
+
"section_name": "Appendix A Proofs",
|
| 71 |
+
"text": "Let be an antichain in a ranked hierarchy , and let be a rank-minimal node of rank .\nThen, there exists an -antichain such that .\nGiven , we say that if such that , i.e., all the nodes in are also in or are descendants of a node in . It is immediate to note that is a partial order and, since is a hierarchy, that if then , since the nodes in cover the ones in , they can be less than those in .\nNow let be an antichain with a rank-minimal node of rank .\nIf no node in has a child with rank , then is already an -antichain.\nOtherwise, pick a node in with at least one child with rank . Replace with all and only its children with rank to obtain a new antichain .\nWe have that , , and .\nBy repeating this process, we obtain a strictly decreasing sequence in , which must terminate in an -antichain with .\n\u220e\n###figure_5### As an example of the procedure used in the proof of Prop. A.1 ###reference_theorem1###, refer to Figure 5 ###reference_###.\nAt point , there is an -antichain . Since and present children with rank , it is possible to \u201cexpand downward\u201d the antichain, including their children. At step 2, is substituted with and . Continuing, at step 3, is substituted with its child , the only one at level . At the last step, is substituted with and . At this point there are no more children of the nodes in the -antichain with children above the threshold , and the process terminates."
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"section_id": "Appendix 2",
|
| 75 |
+
"parent_section_id": null,
|
| 76 |
+
"section_name": "Appendix B Further details of the databases",
|
| 77 |
+
"text": ""
|
| 78 |
+
}
|
| 79 |
+
],
|
| 80 |
+
"tables": {
|
| 81 |
+
"1": {
|
| 82 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1. </span>H-index and statistics on Drugbank. \u201dFull\u201d (top row) \u2013 the calculations on the full hierarchy; \u201dLeaves/drugs\u201d (second row) \u2013 those on the flat hierarchy (only leaves).</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S3.T1.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.1.1.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T1.1.1.1.1\">hierarchy</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.1.1.2\"><span class=\"ltx_text ltx_font_bold ltx_font_italic\" id=\"S3.T1.1.1.1.2.1\">h<span class=\"ltx_text ltx_font_upright\" id=\"S3.T1.1.1.1.2.1.1\">-index</span></span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.1.1.3\">median</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.1.1.4\">max</td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T1.1.1.1.5\">nodes</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.1.1.6\">vis.</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.1.1.7\">dig.</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.2.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_t\" id=\"S3.T1.1.2.2.1\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.1.1\" style=\"background-color:#96FA99;\">Full</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.2.2\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.2.1\" style=\"background-color:#96FA99;\">69</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.2.3\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.3.1\" style=\"background-color:#96FA99;\">100</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.2.2.4\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.4.1\" style=\"background-color:#96FA99;\">260</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T1.1.2.2.5\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.5.1\" style=\"background-color:#96FA99;\">11,803</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.1.2.2.6\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.6.1\" style=\"background-color:#96FA99;\">3,743</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.1.2.2.7\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T1.1.2.2.7.1\" style=\"background-color:#96FA99;\">417</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.3.3\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T1.1.3.3.1\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.1.1\" style=\"background-color:#E0FFFF;\">Leaves/drugs</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.3.3.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.2.1\" style=\"background-color:#E0FFFF;\">33</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.3.3.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.3.1\" style=\"background-color:#E0FFFF;\">44</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.3.3.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.4.1\" style=\"background-color:#E0FFFF;\">76</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T1.1.3.3.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.5.1\" style=\"background-color:#E0FFFF;\">10,303</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.3.3.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.6.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.3.3.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T1.1.3.3.7.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.4.4\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_t\" id=\"S3.T1.1.4.4.1\">Subclass</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.4.2\">63</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.4.3\">105</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T1.1.4.4.4\">1,369</td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T1.1.4.4.5\">837</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.1.4.4.6\">541</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T1.1.4.4.7\">276</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.1.5.5\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T1.1.5.5.1\">Class</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.5.5.2\">49</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.5.5.3\">120</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T1.1.5.5.4\">2,327</td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T1.1.5.5.5\">328</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.5.5.6\">287</td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T1.1.5.5.7\">154</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 83 |
+
"capture": "Table 1. H-index and statistics on Drugbank. \u201dFull\u201d (top row) \u2013 the calculations on the full hierarchy; \u201dLeaves/drugs\u201d (second row) \u2013 those on the flat hierarchy (only leaves)."
|
| 84 |
+
},
|
| 85 |
+
"2": {
|
| 86 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2. </span><span class=\"ltx_text ltx_font_italic\" id=\"S3.T2.2.1\">h</span>-index\u00a0calculations for GtoPdb. \u201dFull\u201d (top rows) are the calculations on the full hierarchy; \u201dNo\u201d (second rows) are those on the flat hierarchy (only leaves); and \u201dLifted\u201d (bottom rows) are those on the lifted hierarchy. </figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T2.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T2.3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_r\" id=\"S3.T2.3.1.1.1\">Cit. type</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_rr\" id=\"S3.T2.3.1.1.2\">Hierarchy</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_r\" id=\"S3.T2.3.1.1.3\"><span class=\"ltx_text ltx_font_bold ltx_font_italic\" id=\"S3.T2.3.1.1.3.1\">h<span class=\"ltx_text ltx_font_upright\" id=\"S3.T2.3.1.1.3.1.1\">-index</span></span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_r\" id=\"S3.T2.3.1.1.4\">median</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_r\" id=\"S3.T2.3.1.1.5\">max</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_rr\" id=\"S3.T2.3.1.1.6\">nodes</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.3.1.1.7\">vis.</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T2.3.1.1.8\">dig.</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T2.3.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T2.3.2.1.1\" rowspan=\"4\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.1.1\">Citations</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_t\" id=\"S3.T2.3.2.1.2\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.2.1\" style=\"background-color:#96FA99;\">Full</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.2.1.3\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.3.1\" style=\"background-color:#96FA99;\">55</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.2.1.4\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.4.1\" style=\"background-color:#96FA99;\">77</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.2.1.5\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.5.1\" style=\"background-color:#96FA99;\">276</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T2.3.2.1.6\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.6.1\" style=\"background-color:#96FA99;\">4,079</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.2.1.7\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.7.1\" style=\"background-color:#96FA99;\">1,185</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.2.1.8\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.2.1.8.1\" style=\"background-color:#96FA99;\">275</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.3.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T2.3.3.2.1\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.1.1\" style=\"background-color:#E0FFFF;\">No</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.3.2.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.2.1\" style=\"background-color:#E0FFFF;\">32</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.3.2.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.3.1\" style=\"background-color:#E0FFFF;\">44</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.3.2.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.4.1\" style=\"background-color:#E0FFFF;\">94</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T2.3.3.2.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.5.1\" style=\"background-color:#E0FFFF;\">4,079</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.3.2.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.6.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.3.2.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.3.2.7.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.4.3\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_t\" id=\"S3.T2.3.4.3.1\">Families</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.4.3.2\">54</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.4.3.3\">86</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.4.3.4\">276</td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T2.3.4.3.5\">807</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.4.3.6\">572</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.4.3.7\">253</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.5.4\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T2.3.5.4.1\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.1.1\" style=\"background-color:#FFF0F5;\">Lifted</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.5.4.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.2.1\" style=\"background-color:#FFF0F5;\">55</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.5.4.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.3.1\" style=\"background-color:#FFF0F5;\">77</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.5.4.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.4.1\" style=\"background-color:#FFF0F5;\">276</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T2.3.5.4.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.5.1\" style=\"background-color:#FFF0F5;\">4,872</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.5.4.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.6.1\" style=\"background-color:#FFF0F5;\">1,185</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.5.4.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.5.4.7.1\" style=\"background-color:#FFF0F5;\">275</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.6.5\">\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_tt\" id=\"S3.T2.3.6.5.1\" rowspan=\"4\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.1.1\">Links</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_tt\" id=\"S3.T2.3.6.5.2\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.2.1\" style=\"background-color:#96FA99;\">Full</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_tt\" id=\"S3.T2.3.6.5.3\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.3.1\" style=\"background-color:#96FA99;\">167</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_tt\" id=\"S3.T2.3.6.5.4\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.4.1\" style=\"background-color:#96FA99;\">251</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_tt\" id=\"S3.T2.3.6.5.5\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.5.1\" style=\"background-color:#96FA99;\">880</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_tt\" id=\"S3.T2.3.6.5.6\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.6.1\" style=\"background-color:#96FA99;\">4,079</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S3.T2.3.6.5.7\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.7.1\" style=\"background-color:#96FA99;\">1,584</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_tt\" id=\"S3.T2.3.6.5.8\" style=\"background-color:#96FA99;\"><span class=\"ltx_text\" id=\"S3.T2.3.6.5.8.1\" style=\"background-color:#96FA99;\">419</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.7.6\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T2.3.7.6.1\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.1.1\" style=\"background-color:#E0FFFF;\">No</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.7.6.2\" style=\"background-color:#E0FFFF;\">172</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.7.6.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.3.1\" style=\"background-color:#E0FFFF;\">253</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.7.6.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.4.1\" style=\"background-color:#E0FFFF;\">909</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T2.3.7.6.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.5.1\" style=\"background-color:#E0FFFF;\">4,079</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.7.6.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.6.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.7.6.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T2.3.7.6.7.1\" style=\"background-color:#E0FFFF;\">\u2013</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.8.7\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr ltx_border_t\" id=\"S3.T2.3.8.7.1\">Families</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.8.7.2\">135</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.8.7.3\">298</td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T2.3.8.7.4\">3,013</td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T2.3.8.7.5\">807</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.8.7.6\">588</td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T2.3.8.7.7\">211</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T2.3.9.8\">\n<td class=\"ltx_td ltx_align_left ltx_border_rr\" id=\"S3.T2.3.9.8.1\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.1.1\" style=\"background-color:#FFF0F5;\">Lifted</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.9.8.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.2.1\" style=\"background-color:#FFF0F5;\">179</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.9.8.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.3.1\" style=\"background-color:#FFF0F5;\">265</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T2.3.9.8.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.4.1\" style=\"background-color:#FFF0F5;\">909</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T2.3.9.8.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.5.1\" style=\"background-color:#FFF0F5;\">4,872</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.9.8.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.6.1\" style=\"background-color:#FFF0F5;\">1,604</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T2.3.9.8.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T2.3.9.8.7.1\" style=\"background-color:#FFF0F5;\">468</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 87 |
+
"capture": "Table 2. h-index\u00a0calculations for GtoPdb. \u201dFull\u201d (top rows) are the calculations on the full hierarchy; \u201dNo\u201d (second rows) are those on the flat hierarchy (only leaves); and \u201dLifted\u201d (bottom rows) are those on the lifted hierarchy. "
|
| 88 |
+
},
|
| 89 |
+
"3": {
|
| 90 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3. </span><span class=\"ltx_text ltx_font_italic\" id=\"S3.T3.2.1\">h</span>-index\u00a0calculations for NCBI Taxonomy. \u201dGiven\u201d (first rows) refers to the NCBI taxonomy without modifications; and \u201dLifted\u201d (bottom rows) are those on the lifted hierarchy. \u201dPubmed\u201d (top rows) are the calculations counting only the citations from Pubmed; and, \u201dAll\u201d are the calculations counting Pubmed and incoming web links as citations.</figcaption>\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S3.T3.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T3.3.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr\" id=\"S3.T3.3.1.1.1\">Hierarchy</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_r\" id=\"S3.T3.3.1.1.2\">Refs from</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_r\" id=\"S3.T3.3.1.1.3\">Subtree</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_r\" id=\"S3.T3.3.1.1.4\"><span class=\"ltx_text ltx_font_bold ltx_font_italic\" id=\"S3.T3.3.1.1.4.1\">h<span class=\"ltx_text ltx_font_upright\" id=\"S3.T3.3.1.1.4.1.1\">-index</span></span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_rr\" id=\"S3.T3.3.1.1.5\">Nodes</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T3.3.1.1.6\">vis</th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column\" id=\"S3.T3.3.1.1.7\">dig</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T3.3.2.1\">\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T3.3.2.1.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.3.2.1.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.2.1\" style=\"background-color:#E0FFFF;\">Pubmed</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.3.2.1.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.3.1\" style=\"background-color:#E0FFFF;\">animals</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T3.3.2.1.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.4.1\" style=\"background-color:#E0FFFF;\">1,181</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T3.3.2.1.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.5.1\" style=\"background-color:#E0FFFF;\">1,147,717</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.3.2.1.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.6.1\" style=\"background-color:#E0FFFF;\">47,880</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.3.2.1.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.2.1.7.1\" style=\"background-color:#E0FFFF;\">5,724</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.3.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S3.T3.3.3.2.1\">Given</td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.3.2.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.2.1\" style=\"background-color:#E0FFFF;\">Pubmed</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.3.2.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.3.1\" style=\"background-color:#E0FFFF;\">vertebrates</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.3.2.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.4.1\" style=\"background-color:#E0FFFF;\">681</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.3.2.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.5.1\" style=\"background-color:#E0FFFF;\">112,271</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.3.2.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.6.1\" style=\"background-color:#E0FFFF;\">17,503</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.3.2.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.3.2.7.1\" style=\"background-color:#E0FFFF;\">3,133</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.4.3\">\n<td class=\"ltx_td ltx_border_rr\" id=\"S3.T3.3.4.3.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.4.3.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.2.1\" style=\"background-color:#FFF0F5;\">All</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.4.3.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.3.1\" style=\"background-color:#FFF0F5;\">animals</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.4.3.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.4.1\" style=\"background-color:#FFF0F5;\">3,794</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.4.3.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.5.1\" style=\"background-color:#FFF0F5;\">1,147,717</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.4.3.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.6.1\" style=\"background-color:#FFF0F5;\">489,692</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.4.3.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.4.3.7.1\" style=\"background-color:#FFF0F5;\">16,712</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.5.4\">\n<td class=\"ltx_td ltx_border_rr\" id=\"S3.T3.3.5.4.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.5.4.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.2.1\" style=\"background-color:#FFF0F5;\">All</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.5.4.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.3.1\" style=\"background-color:#FFF0F5;\">vertebrates</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.5.4.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.4.1\" style=\"background-color:#FFF0F5;\">2,065</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.5.4.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.5.1\" style=\"background-color:#FFF0F5;\">112,271</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.5.4.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.6.1\" style=\"background-color:#FFF0F5;\">43,282</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.5.4.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.5.4.7.1\" style=\"background-color:#FFF0F5;\">9,254</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.6.5\">\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T3.3.6.5.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.3.6.5.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.2.1\" style=\"background-color:#E0FFFF;\">Pubmed</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.3.6.5.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.3.1\" style=\"background-color:#E0FFFF;\">animals</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r ltx_border_t\" id=\"S3.T3.3.6.5.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.4.1\" style=\"background-color:#E0FFFF;\">1,301</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr ltx_border_t\" id=\"S3.T3.3.6.5.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.5.1\" style=\"background-color:#E0FFFF;\">1,272,867</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.3.6.5.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.6.1\" style=\"background-color:#E0FFFF;\">47,665</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S3.T3.3.6.5.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.6.5.7.1\" style=\"background-color:#E0FFFF;\">6,560</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.7.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S3.T3.3.7.6.1\">Lifted</td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.7.6.2\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.2.1\" style=\"background-color:#E0FFFF;\">Pubmed</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.7.6.3\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.3.1\" style=\"background-color:#E0FFFF;\">vertebrates</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.7.6.4\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.4.1\" style=\"background-color:#E0FFFF;\">758</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.7.6.5\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.5.1\" style=\"background-color:#E0FFFF;\">131,464</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.7.6.6\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.6.1\" style=\"background-color:#E0FFFF;\">17,700</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.7.6.7\" style=\"background-color:#E0FFFF;\"><span class=\"ltx_text\" id=\"S3.T3.3.7.6.7.1\" style=\"background-color:#E0FFFF;\">3,638</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.8.7\">\n<td class=\"ltx_td ltx_border_rr\" id=\"S3.T3.3.8.7.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.8.7.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.2.1\" style=\"background-color:#FFF0F5;\">All</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.8.7.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.3.1\" style=\"background-color:#FFF0F5;\">animals</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.8.7.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.4.1\" style=\"background-color:#FFF0F5;\">3,887</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.8.7.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.5.1\" style=\"background-color:#FFF0F5;\">1,272,867</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.8.7.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.6.1\" style=\"background-color:#FFF0F5;\">494,769</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.8.7.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.8.7.7.1\" style=\"background-color:#FFF0F5;\">18,284</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T3.3.9.8\">\n<td class=\"ltx_td ltx_border_rr\" id=\"S3.T3.3.9.8.1\"></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.9.8.2\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.2.1\" style=\"background-color:#FFF0F5;\">All</span></td>\n<td class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.3.9.8.3\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.3.1\" style=\"background-color:#FFF0F5;\">vertebrates</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_r\" id=\"S3.T3.3.9.8.4\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.4.1\" style=\"background-color:#FFF0F5;\">2,148</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_rr\" id=\"S3.T3.3.9.8.5\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.5.1\" style=\"background-color:#FFF0F5;\">131,464</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.9.8.6\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.6.1\" style=\"background-color:#FFF0F5;\">45,996</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S3.T3.3.9.8.7\" style=\"background-color:#FFF0F5;\"><span class=\"ltx_text\" id=\"S3.T3.3.9.8.7.1\" style=\"background-color:#FFF0F5;\">10,371</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 91 |
+
"capture": "Table 3. h-index\u00a0calculations for NCBI Taxonomy. \u201dGiven\u201d (first rows) refers to the NCBI taxonomy without modifications; and \u201dLifted\u201d (bottom rows) are those on the lifted hierarchy. \u201dPubmed\u201d (top rows) are the calculations counting only the citations from Pubmed; and, \u201dAll\u201d are the calculations counting Pubmed and incoming web links as citations."
|
| 92 |
+
}
|
| 93 |
+
},
|
| 94 |
+
"image_paths": {
|
| 95 |
+
"1": {
|
| 96 |
+
"figure_path": "2408.09842v1_figure_1.png",
|
| 97 |
+
"caption": "Figure 1. Partial representation of the Drugbank hierarchical structure. The web pages associated with the nodes are shown on the left. In Drugbank only the leaves of the hierarchy are directly cited; e.g., we can see that the citations of \u201dLactones\u201d is the aggregation of the citations to the drugs (we report only Lovastatin and Erythromycin) belonging to the class.",
|
| 98 |
+
"url": "http://arxiv.org/html/2408.09842v1/x1.png"
|
| 99 |
+
},
|
| 100 |
+
"2": {
|
| 101 |
+
"figure_path": "2408.09842v1_figure_2.png",
|
| 102 |
+
"caption": "Figure 2. A hierarchical citation structure in which the level of each node is determined by its rank \u2013 the number of citations to it. Columns: R \u2013 the level; N \u2013 the length of the maximal l\ud835\udc59litalic_l-antichain at that level; and H \u2013 the h-index for that antichain.",
|
| 103 |
+
"url": "http://arxiv.org/html/2408.09842v1/x2.png"
|
| 104 |
+
},
|
| 105 |
+
"3": {
|
| 106 |
+
"figure_path": "2408.09842v1_figure_3.png",
|
| 107 |
+
"caption": "Figure 3. Part of the IUPHAR/BPS hierarchical structure. The root node represents the whole databases, and the family structure is not stratified as families may have subfamilies. All the nodes in the hierarchy can be independently cited; e.g., we show some sample citation numbers for the GPCR branch of the tree, where the internal nodes can receive direct citations (#cit) that could be also aggregated (#cit agg) with the citations of the child nodes.",
|
| 108 |
+
"url": "http://arxiv.org/html/2408.09842v1/x3.png"
|
| 109 |
+
},
|
| 110 |
+
"4": {
|
| 111 |
+
"figure_path": "2408.09842v1_figure_4.png",
|
| 112 |
+
"caption": "Figure 4. Example of one hierarchy (a) and its corresponding \u201clifted\u201d version (b). Once again, R is the value of the rank at each level, N is the cardinality of the maximum antichain at each level, H is the h-index of that antichain. Below each hierarchy, its h-index is highlighted.",
|
| 113 |
+
"url": "http://arxiv.org/html/2408.09842v1/x4.png"
|
| 114 |
+
},
|
| 115 |
+
"5": {
|
| 116 |
+
"figure_path": "2408.09842v1_figure_5.png",
|
| 117 |
+
"caption": "Figure 5. Example of the procedure described in Lemma 2.5 applied on one example hierarchy.",
|
| 118 |
+
"url": "http://arxiv.org/html/2408.09842v1/x5.png"
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
"validation": true,
|
| 122 |
+
"references": [
|
| 123 |
+
{
|
| 124 |
+
"1": {
|
| 125 |
+
"title": "The Concise Guide to PHARMACOLOGY 2019/20: G\nprotein-coupled receptors.",
|
| 126 |
+
"author": "Stephen PH Alexander,\nArthur Christopoulos, Anthony P\nDavenport, Eamonn Kelly, Alistair\nMathie, John A Peters, Emma L Veale,\nJane F Armstrong, Elena Faccenda,\nSimon D Harding, et al.\n2019a.",
|
| 127 |
+
"venue": "British journal of pharmacology\n176 (2019), S21\u2013S141.",
|
| 128 |
+
"url": null
|
| 129 |
+
}
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"2": {
|
| 133 |
+
"title": "The Concise Guide to PHARMACOLOGY 2019/20:\nIntroduction and other protein targets.",
|
| 134 |
+
"author": "Stephen PH Alexander,\nEamonn Kelly, Alistair Mathie,\nJohn A Peters, Emma L Veale,\nJane F Armstrong, Elena Faccenda,\nSimon D Harding, Adam J Pawson,\nJoanna L Sharman, et al.\n2019b.",
|
| 135 |
+
"venue": "British journal of pharmacology\n176 (2019), S1\u2013S20.",
|
| 136 |
+
"url": null
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"3": {
|
| 141 |
+
"title": "Gene ontology: tool for the unification of\nbiology.",
|
| 142 |
+
"author": "Michael Ashburner,\nCatherine A Ball, Judith A Blake,\nDavid Botstein, Heather Butler,\nJ Michael Cherry, Allan P Davis,\nKara Dolinski, Selina S Dwight,\nJanan T Eppig, et al.\n2000.",
|
| 143 |
+
"venue": "Nature genetics 25,\n1 (2000), 25\u201329.",
|
| 144 |
+
"url": null
|
| 145 |
+
}
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"4": {
|
| 149 |
+
"title": "What do we know about the h index?",
|
| 150 |
+
"author": "L. Bornmann and H.-D.\nDaniel. 2007.",
|
| 151 |
+
"venue": "Journal of the American Society for\nInformation Science and Technology 58,\n9 (2007), 1381\u20131385.",
|
| 152 |
+
"url": null
|
| 153 |
+
}
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"5": {
|
| 157 |
+
"title": "The inconsistency of h-index: A mathematical\nanalysis.",
|
| 158 |
+
"author": "Ricardo Brito and\nAlonso Rodr\u00edguez Navarro.\n2021.",
|
| 159 |
+
"venue": "Journal of Informetrics\n15, 1 (2021),\n101106.",
|
| 160 |
+
"url": null
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"6": {
|
| 165 |
+
"title": "Why data citation isn\u2019t working, and what to do\nabout it.",
|
| 166 |
+
"author": "Peter Buneman, Greig\nChristie, Jamie A. Davies, Roza\nDimitrellou, Simon D. Harding, Adam J.\nPawson, Joanna L. Sharman, and Yinjun\nWu. 2020.",
|
| 167 |
+
"venue": "Database J. Biol. Databases Curation\n2020 (2020).",
|
| 168 |
+
"url": null
|
| 169 |
+
}
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"7": {
|
| 173 |
+
"title": "A Rule-Based Citation System for Structured and\nEvolving Datasets.",
|
| 174 |
+
"author": "P. Buneman and G.\nSilvello. 2010.",
|
| 175 |
+
"venue": "IEEE Data Eng. Bull. 33,\n3 (2010), 33\u201341.",
|
| 176 |
+
"url": null
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"8": {
|
| 181 |
+
"title": "Data Journals: A Survey.",
|
| 182 |
+
"author": "L. Candela, D. Castelli,\nP. Manghi, and A. Tani.\n2015.",
|
| 183 |
+
"venue": "Journal of the Association for Information\nScience and Technology 66, 9\n(2015), 1747\u20131762.",
|
| 184 |
+
"url": null
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"9": {
|
| 189 |
+
"title": "The H-index can be easily manipulated.",
|
| 190 |
+
"author": "Bart de Keijzer and\nKrzysztof R. Apt. 2013.",
|
| 191 |
+
"venue": "",
|
| 192 |
+
"url": null
|
| 193 |
+
}
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"10": {
|
| 197 |
+
"title": "The IUPHAR/BPS Guide to PHARMACOLOGY in 2018:\nupdates and expansion to encompass the new guide to IMMUNOPHARMACOLOGY.",
|
| 198 |
+
"author": "Simon D Harding, Joanna L\nSharman, Elena Faccenda, Chris Southan,\nAdam J Pawson, Sam Ireland,\nAlasdair JG Gray, Liam Bruce,\nStephen PH Alexander, Stephen Anderton,\net al. 2018.",
|
| 199 |
+
"venue": "Nucleic acids research\n46, D1 (2018),\nD1091\u2013D1106.",
|
| 200 |
+
"url": null
|
| 201 |
+
}
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"11": {
|
| 205 |
+
"title": "An index to quantify an individual\u2019s scientific\nresearch output.",
|
| 206 |
+
"author": "J. E. Hirsch.\n2005.",
|
| 207 |
+
"venue": "Proceedings of the National academy of\nSciences 102, 46\n(2005), 16569\u201316572.",
|
| 208 |
+
"url": null
|
| 209 |
+
}
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"12": {
|
| 213 |
+
"title": "25 Years of Molecular Biology Databases: A Study\nof Proliferation, Impact, and Maintenance.",
|
| 214 |
+
"author": "H. J. Imker.\n2018.",
|
| 215 |
+
"venue": "Frontiers in Research Metrics and\nAnalytics 3 (2018).",
|
| 216 |
+
"url": null
|
| 217 |
+
}
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"13": {
|
| 221 |
+
"title": "The NCBI handbook.",
|
| 222 |
+
"author": "Jo McEntyre and Jim\nOstell. 2002.",
|
| 223 |
+
"venue": "Bethesda (MD): National Center for\nBiotechnology Information (US) (2002).",
|
| 224 |
+
"url": null
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"14": {
|
| 229 |
+
"title": "Manipulating citation indices in a social context.",
|
| 230 |
+
"author": "Chrystalla Pavlou and\nEdith Elkind. 2016.",
|
| 231 |
+
"venue": "(2016).",
|
| 232 |
+
"url": null
|
| 233 |
+
}
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"15": {
|
| 237 |
+
"title": "Database resources of the national center for\nbiotechnology information.",
|
| 238 |
+
"author": "Eric W Sayers, Richa\nAgarwala, Evan E Bolton, J Rodney\nBrister, Kathi Canese, Karen Clark,\nRyan Connor, Nicolas Fiorini,\nKathryn Funk, Timothy Hefferon,\net al. 2019.",
|
| 239 |
+
"venue": "Nucleic acids research\n47, Database issue\n(2019), D23.",
|
| 240 |
+
"url": null
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"16": {
|
| 245 |
+
"title": "NCBI Taxonomy: a comprehensive update on curation,\nresources and tools.",
|
| 246 |
+
"author": "Conrad L Schoch, Stacy\nCiufo, Mikhail Domrachev, Carol L\nHotton, Sivakumar Kannan, Rogneda\nKhovanskaya, Detlef Leipe, Richard\nMcveigh, Kathleen O\u2019Neill, Barbara\nRobbertse, et al. 2020.",
|
| 247 |
+
"venue": "Database 2020\n(2020).",
|
| 248 |
+
"url": null
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"17": {
|
| 253 |
+
"title": "Multiple versions of the h-index: cautionary use\nfor formal academic purposes.",
|
| 254 |
+
"author": "J. A. Teixeira da Silva and\nJ. Dobr\u00e1nszki. 2018.",
|
| 255 |
+
"venue": "Scientometrics 115,\n2 (2018), 1107\u20131113.",
|
| 256 |
+
"url": null
|
| 257 |
+
}
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"18": {
|
| 261 |
+
"title": "H-index manipulation by merging articles: Models,\ntheory, and experiments.",
|
| 262 |
+
"author": "Ren\u00e9 Van Bevern,\nChristian Komusiewicz, Rolf Niedermeier,\nManuel Sorge, and Toby Walsh.\n2016.",
|
| 263 |
+
"venue": "Artificial Intelligence\n240 (2016), 19\u201335.",
|
| 264 |
+
"url": null
|
| 265 |
+
}
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"19": {
|
| 269 |
+
"title": "DrugBank 5.0: a major update to the DrugBank\ndatabase for 2018.",
|
| 270 |
+
"author": "D. S. Wishart, Y. D.\nFeunang, A. C. Guo, E. J. Lo,\nA. Marcu, J. R. Grant,\nT. Sajed, D. Johnson, C.\nLi, Z. Sayeeda, N. Assempour,\nI. Iynkkaran, Y. Liu, A.\nMaciejewski, N. Gale, A. Wilson,\nL. Chin, R. Cummings, D.\nLe, A. Pon, C. Knox, and\nM. Wilson. 2018.",
|
| 271 |
+
"venue": "Nucleic Acids Research\n46, Database-Issue\n(2018), D1074\u2013D1082.",
|
| 272 |
+
"url": null
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"20": {
|
| 277 |
+
"title": "Data citation: giving credit where credit is due.\nIn Proceedings of the 2018 international conference\non management of data. 99\u2013114.",
|
| 278 |
+
"author": "Yinjun Wu, Abdussalam\nAlawini, Susan B Davidson, and\nGianmaria Silvello. 2018.",
|
| 279 |
+
"venue": "",
|
| 280 |
+
"url": null
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"21": {
|
| 285 |
+
"title": "Critique of Hirsch\u2019s citation index: A\ncombinatorial Fermi problem.",
|
| 286 |
+
"author": "Alexander Yong et al.\n2014.",
|
| 287 |
+
"venue": "Notices of the AMS 61,\n9 (2014), 1040\u20131050.",
|
| 288 |
+
"url": null
|
| 289 |
+
}
|
| 290 |
+
}
|
| 291 |
+
],
|
| 292 |
+
"url": "http://arxiv.org/html/2408.09842v1"
|
| 293 |
+
}
|
20240819/2408.09912v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.09936v1.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Electron-nucleus cross sections from transfer learning",
|
| 3 |
+
"abstract": "Transfer learning (TL) allows a deep neural network (DNN) trained on one type of data to be adapted for new problems with limited information. We propose to use the TL technique in physics. The DNN learns the physics of one process, and after fine-tuning, it makes predictions for related processes. We consider the DNNs, trained on inclusive electron-carbon scattering data, and show that after fine-tuning, they accurately predict cross sections for electron interactions with nuclear targets ranging from lithium to iron. The method works even when the DNN is fine-tuned on a small dataset.",
|
| 4 |
+
"sections": [],
|
| 5 |
+
"appendix": [],
|
| 6 |
+
"tables": {},
|
| 7 |
+
"image_paths": {
|
| 8 |
+
"1": {
|
| 9 |
+
"figure_path": "2408.09936v1_figure_1.png",
|
| 10 |
+
"caption": "Figure 1: Double-differential cross section d2\u2062\u03c3/d\u2062\u03c9\u2062d\u2062\u03a9superscript\ud835\udc512\ud835\udf0e\ud835\udc51\ud835\udf14\ud835\udc51\u03a9{d^{2}\\sigma}/{d\\omega d\\Omega}italic_d start_POSTSUPERSCRIPT 2 end_POSTSUPERSCRIPT italic_\u03c3 / italic_d italic_\u03c9 italic_d roman_\u03a9 for inclusive electron scattering on lithium [15], oxygen [11], aluminum [16], calcium [17], and iron [18] for selected kinematics. The fits obtained for the proportion of training to test datasets 7:3. The red line denotes the DNNs predictions, and the green area denotes 1\u2062\u03c31\ud835\udf0e1\\sigma1 italic_\u03c3 uncertainty. The results for DNN with all/two last layers fine-tuned are shown in the top/bottom row. The electron-carbon DNN predictions multiplied by factor A/12 are shown by blue-dashed lines corresponding to 1\u2062\u03c31\ud835\udf0e1\\sigma1 italic_\u03c3 uncertainty denoted by a light blue area. The red (green) points represent the training (test) dataset.",
|
| 11 |
+
"url": "http://arxiv.org/html/2408.09936v1/x1.png"
|
| 12 |
+
},
|
| 13 |
+
"2": {
|
| 14 |
+
"figure_path": "2408.09936v1_figure_2.png",
|
| 15 |
+
"caption": "Figure 2: Caption same as in Fig. 1 but results obtained for the proportion of training to test datasets 1:9.",
|
| 16 |
+
"url": "http://arxiv.org/html/2408.09936v1/x2.png"
|
| 17 |
+
}
|
| 18 |
+
},
|
| 19 |
+
"validation": true,
|
| 20 |
+
"references": [],
|
| 21 |
+
"url": "http://arxiv.org/html/2408.09936v1"
|
| 22 |
+
}
|
20240819/2408.09954v1.json
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS",
|
| 3 |
+
"abstract": "Long Range - Frequency Hopping Spread Spectrum (LR-FHSS) is an emerging and promising technology recently introduced into the LoRaWAN protocol specification for both terrestrial and non-terrestrial networks, notably satellites.\nThe higher capacity, long-range and robustness to Doppler effect make LR-FHSS a primary candidate for direct-to-satellite (DtS) connectivity for enabling Internet-of-things (IoT) in remote areas. The LR-FHSS devices envisioned for DtS IoT will be primarily battery-powered. Therefore, it is crucial to investigate the current consumption characteristics and Time-on-Air (ToA) of LR-FHSS technology. However, to our knowledge, no prior research has presented the accurate ToA and current consumption models for this newly introduced scheme. This paper addresses this shortcoming through extensive field measurements and the development of analytical models. Specifically, we have measured the current consumption and ToA for variable transmit power, message payload, and two new LR-FHSS-based Data Rates (DR8 and DR9). We also develop current consumption and ToA analytical models demonstrating a strong correlation with the measurement results exhibiting a relative error of less than 0.3%. Thus, it confirms the validity of our models. Conversely, the existing analytical models exhibit a higher relative error rate of -9.2 to 3.4% compared to our measurement results. The presented in this paper results can be further used for simulators or in analytical studies to accurately model the on-air time and energy consumption of LR-FHSS devices.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The LoRaWAN direct-to-satellite (DtS) communication is emerging as a potential connectivity solution for IoT applications, especially for the ones deployed in remote areas [1 ###reference_b1###, 2 ###reference_b2###]. As a recent advancement to the LoRaWAN protocol specification, Long Range - Frequency Hopping Spread Spectrum (LR-FHSS) has been designed and introduced to support both terrestrial and non-terrestrial IoT networks to offer low-power connectivity. Along with the long coverage, LR-FHSS offers robustness to interference through lower code rate, intra-packet frequency hopping, and header diversity [3 ###reference_b3###]. Due to these characteristics and strong robustness to the Doppler effect, LR-FHSS stands out as one of the prominent low-power wide-area network (LPWAN) technologies to enable direct connectivity between machine devices and the Low Earth Orbit (LEO) satellites. In [4 ###reference_b4###], an initial insight into the LR-FHSS network performance and theoretical capacity has been provided. Specifically, in European Telecommunications Standards Institute (ETSI) regions, a single gateway operating on 125 kHz bandwidth can receive up to 1 million daily packets while maintaining a 10% error rate. Conversely, according to the Federal Communications Commission (FCC) region with 1.5 MHz channel bandwidth, the capacity increases to 11 million daily packets. The LR-FHSS technology aligns with the connectivity needs of DtS IoT by offering a combination of high capacity, resistance to Doppler effects, and robustness against interference [5 ###reference_b5###]. The Long Range Wide Area Network (LoRaWAN) protocol, the LR-FHSS, and conventional LoRa modulations can co-exist without modifying the existing network architecture. To leverage this incorporation, Adaptive Data Rate (ADR) commands from the network server are sufficient to switch between the modulations [6 ###reference_b6###]. However, LR-FHSS only supports uplink communication, i.e., from the device to a gateway. For the successful demodulation of an uplink LR-FHSS packet, a LoRaWAN V2 gateway with digital signal processing capabilities is required. The network can employ either LoRa or Frequency-shift keying (FSK) modulations for downlink communication [7 ###reference_b7###]."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Relevant works",
|
| 15 |
+
"text": "The earlier studies discuss and examine LR-FHSS DtS performance, mainly focusing on network scalability. Specifically, study [3 ###reference_b3###] provides an overview of LR-FHSS and compares its goodput with the LoRa modulation. An analytical and simulation model was proposed to investigate the link budget and Media Access Control (MAC) performance of LR-FHSS for DtS scenarios in [6 ###reference_b6###]. The results obtained from these models confirm the feasibility of LR-FHSS DtS connectivity. The authors in [8 ###reference_b8###] develop an outage probability analytical model for the DtS IoT network, considering factors such as channel fading, noise, and capture effect. The results reported in [3 ###reference_b3###, 6 ###reference_b6###, 8 ###reference_b8###] reveal that LR-FHSS has a significantly higher capacity than the LoRa modulation. Furthermore, the LR-FHSS-enabled DtS network can offer connectivity services to numerous applications in remote areas, i.e., autonomous ships in the deep sea and smart farming in deserts [9 ###reference_b9###].\nDespite the feasibility of DtS IoT, the limited energy resources at the terrestrial and space segment are one of the key challenges to satellite IoT [10 ###reference_b10###]. The user devices envisioned for DtS IoT will be compact, low-cost, and powered by a battery that intends to last for years [9 ###reference_b9###, 11 ###reference_b11###]. Therefore, it is important to understand the energy consumption characteristic of terrestrial IoT devices. The energy consumption of satellite IoT technology has not been studied extensively. Only a few studies examine the energy efficiency of IoT devices. In [12 ###reference_b12###], the authors model and evaluate the energy performance of Iridium satellite IoT devices. The current consumption is measured using a RockBLOCK Mk2 device with an Iridium 9602 module and supports satellite IoT services. According to their findings, an Iridium IoT device powered by a 2400 mAh battery can maintain operations for an estimated 43.8 days by transmitting a 12 bytes message every 10 minutes. The works in [13 ###reference_b13###, 14 ###reference_b14###] measure and model LoRa energy consumption for terrestrial networks. Specifically, in [13 ###reference_b13###], experiments have been conducted to measure the timing and current consumption of the different states (e.g., radio preparation, transmission, radio off, post-processing) involved in a LoRa transmission. The results also report the impact of LoRaWAN Data Rates (DR) settings on battery lifetime.\nIn this paper, we carry measurements using LR-FHSS-enabled devices to identify and define the operational states of the system, their corresponding timings, and current consumption. These measurements are the foundation for our work, culminating by developing analytical models that accurately estimate the current consumption and ToA of LR-FHSS transmissions. To the authors\u2019 knowledge, no previous study has specifically investigated the LR-FHSS current consumption characteristics. The existing LR-FHSS literature [3 ###reference_b3###, 6 ###reference_b6###, 8 ###reference_b8###, 9 ###reference_b9###, 11 ###reference_b11###, 15 ###reference_b15###, 5 ###reference_b5###] focuses primarily on radio propagation and network scalability theoretical aspects. Therefore, this paper aims to bridge this gap by providing empirical insights into the current consumption and ToA of LR-FHSS, complementing the existing theoretical understanding of this technology. The obtained results and developed models enable a more accurate study and characterization of the LR-FHSS devices\u2019 performance using analytical methods and simulators. Our work can allow us to assess the feasibility of using this technology for different practical use cases with heterogeneous\nconnectivity needs, especially the satellite IoT."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Our contributions",
|
| 21 |
+
"text": "The major contributions of this paper are as follows:\nwe conduct an extensive state-of-the-art review and present the up-to-the-date background and technical details of LR-FHSS operations. Specifically, we accumulate the information from our experimental measurements, state-of-the-art publications [3 ###reference_b3###, 6 ###reference_b6###, 8 ###reference_b8###, 9 ###reference_b9###, 11 ###reference_b11###, 5 ###reference_b5###], LoRaWAN regional parameters document [16 ###reference_b16###], and LR-FHSS device specifications [4 ###reference_b4###, 7 ###reference_b7###] and the software [17 ###reference_b17###].\nwe conduct measurements using the LR-FHSS-enabled LR1120 development kits [4 ###reference_b4###]. The measurement results from the DC Power Analyzer illustrate the key system modes the device goes through for transmitting an LR-FHSS packet. Furthermore, we examine how payload, transmit power, and LR-FHSS-based DR configurations influence these modes\u2019 timings and current consumption.\nwe leverage measurement results to develop a more accurate analytical ToA model. Our analytical model closely aligns with the measurements, demonstrating a relative error lower than 0.3%.\nwe introduce an analytical model that estimates the current consumption of LR-FHSS. To the author\u2019s knowledge, our study is the first to present a current consumption model for LR-FHSS technology. We use the proposed models to theoretically estimate the battery lifetime of an LR-FHSS-enabled device.\nwe make the measurement data and the script code used for data processing publicly available at GitHub [to be added later], allowing readers and researchers to access and utilize them for future studies and investigations."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "1.3",
|
| 25 |
+
"parent_section_id": "1",
|
| 26 |
+
"section_name": "Paper outline",
|
| 27 |
+
"text": "The rest of this paper is organized as follows.\nSection II ###reference_### presents the background of LR-FHSS technology and the LR1120 development kit.\nSection III ###reference_### discusses the experiment setup.\nSelected measurement results are presented in Section IV ###reference_###.\nThe development of ToA and current consumption analytical models are detailed in Section V ###reference_###.\nThe key results derived from the analytical models and their comparison with our measurement results are discussed in Section VI ###reference_###.\nFinally, Section VII ###reference_### concludes this work with final remarks."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "II Technical background",
|
| 33 |
+
"text": ""
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.1",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "II-A LR-FHSS",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.1.1",
|
| 43 |
+
"parent_section_id": "2.1",
|
| 44 |
+
"section_name": "II-A1 Physical layer",
|
| 45 |
+
"text": "The new LoRaWAN DRs (DRs 5-6 for the United States (US) and DRs 8-11 for the European Union (EU) region) feature LR-FHSS modulation [3 ###reference_b3###, 6 ###reference_b6###]. LR-FHSS is based on Gaussian minimum-shift keying (GMSK) modulation, which uses bandwidth time (BT) products to define the pulse shapes.\nCurrently, it features BT = 1; however, the other values of BT are reserved for future use [7 ###reference_b7###]. BT = 1 offers an instantaneous bit rate 488.28125 bit per second (bps) [4 ###reference_b4###], including useful data as well as overhead, e.g., preamble, coding redundant, and cyclic redundancy check (CRC) bits. Each coded bit (i.e., after forward error correction (FEC)) has a duration of approximately 2 ms, as explained in [8 ###reference_b8###]. To enhance network capacity and minimize collisions, LR-FHSS uses intra-packet frequency hopping. Besides the improved capacity, the high-frequency drift tolerance of 300 Hz/s (subject to 1.5 dB sensitivity degradation) makes LR-FHSS a prominent technology for DtS scenarios. A minimum signal-to-noise ratio (SNR) of 3.96 dB is required for successful LR-FHSS signal decoding [15 ###reference_b15###]. With an occupied bandwidth (OBW) channel of 488 Hz, LR-FHSS offers a comparable link budget to LoRa DR0 operating at a 125 kHz bandwidth while delivering 200 times greater capacity. It features frequency hopping, low code rates, and header diversity, resulting in improved network scalability compared with conventional LoRa modulation. Table LABEL:tab:tab1 compares the key characteristics of LR-FHSS and LoRa [18 ###reference_b18###].\nCapacity in million (M) uplink packets per day per gateway for a 1.5 MHz channel bandwidth i.e., operating channel width (OCW) [4 ###reference_b4###].\nLR-FHSS uses convolution encoding to perform forward error detection and correction. Specifically, the header implies a fixed CR= while payload data fragments can use four code rate options CR=. However, current LoRaWAN specifications only support two code rates and reserving the remaining two for future use, as mentioned in the LR1120 user manual [7 ###reference_b7###]. Specifically, in ETSI region, DR8/DR10 and DR9/DR11 imply coding with the code rate equivalent to and , respectively. Similarly, in FCC operational areas, DR5 and DR6 imply coding with the code rate equivalent to and , respectively. Notably, the lower code rate improves the gateway\u2019s ability to correctly demodulate the packet in the presence of noise and interference."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.1.2",
|
| 49 |
+
"parent_section_id": "2.1",
|
| 50 |
+
"section_name": "II-A2 Frame structure",
|
| 51 |
+
"text": "Fig. 1 ###reference_### illustrates the LR-FHSS frame structure. LR-FHSS divides the payload into small fragments and transmits over multiple OBW channels following the frequency hopping sequence picked by a pseudo-random number generator. The packet header contains essential information to notify the gateway about the frequencies and hopping sequence of the payload fragments [5 ###reference_b5###]. Unlike LoRa, the LR-FHSS device transmits several replicas of headers ( 1 \u20264), where the DR setting defines the number of replicas. A gateway should receive at least one of the transmitted headers to decode a packet successfully. However, header diversity improves immunity against co-channel interference, which costs longer ToA. In LoRaWAN protocol, DR5 (in FCC region) and DR8/DR10 (in ETSI region) imply 3, while DR6 (FCC) and DR9/DR11 (ETSI) feature 2 [3 ###reference_b3###, 6 ###reference_b6###].\nEach encoded header comprises 114 bits resulting in header duration as 233.472 ms. Contrary, each encoded payload (payload plus CRC) fragment has 50 bits containing 48 payload bits and 2 bits preamble in each hop [7 ###reference_b7###, 8 ###reference_b8###, 15 ###reference_b15###]. Therefore, each payload fragment is 102.4 ms long. However, the last payload fragment can be shorter.\n###figure_1###"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "2.1.3",
|
| 55 |
+
"parent_section_id": "2.1",
|
| 56 |
+
"section_name": "II-A3 Channels and Frequency hopping",
|
| 57 |
+
"text": "LR-FHSS provides a range of configuration options for channel bandwidths, hopping grid size, and minimum frequency separation. The whole frequency band is divided into multiple operating channel width (OCW) channels, allowing the cumulative bandwidth to span from 39.06 kHz to 1.5742 MHz, depending on the local frequency regulations [4 ###reference_b4###]. For frequency hopping, a single LR-FHSS OCW channel is further divided into multiple subchannels with a bandwidth of 488 Hz, named OBW channels. At any moment, a single device can transmit its data (header or fragment) only in one OBW channel, also known as a hop. Note that multiple OBW channels are required to transmit the full LR-FHSS packet. Fig.2 ###reference_### depicts the transmission of 15 bytes payload implying , header replica 3 resulting in number of payload fragments 9.\n###figure_2###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "2.1.4",
|
| 61 |
+
"parent_section_id": "2.1",
|
| 62 |
+
"section_name": "II-A4 Existing Time-on-Air models",
|
| 63 |
+
"text": "The existing LR-FHSS ToA models are based primarily on LoRaWAN regional specification document [16 ###reference_b16###]."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "2.1.x",
|
| 67 |
+
"parent_section_id": "2.1",
|
| 68 |
+
"section_name": "Model I",
|
| 69 |
+
"text": "Departing from the information in [6 ###reference_b6###], the ToA of an LR-FHSS packet for payload bytes is expressed as\nwhere equals 2 and 4 for DR8/DR10 and\nDR9/DR11, respectively. This model implies all the payload fragments have the same duration."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "2.1.x",
|
| 73 |
+
"parent_section_id": "2.1",
|
| 74 |
+
"section_name": "Model II",
|
| 75 |
+
"text": "Unlike the previous model, the work in [8 ###reference_b8###, 15 ###reference_b15###] accounts for the fact that the last fragment could be shorter than standard 102.4 ms. Following this model, the ToA can be calculated as\nwhere is the number of payload (data payload plus CRC) fragments after FEC coding. Since we know each hop can accommodate 48 bits (6 bytes), thus, becomes\nwhere 2 bytes accounts for CRC."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "2.2",
|
| 79 |
+
"parent_section_id": "2",
|
| 80 |
+
"section_name": "II-B LR1120 development kit",
|
| 81 |
+
"text": "The LR1120 is the first commercially available development kit featuring an LR-FHSS modulator. This real transceiver allows us to get insight into the LR-FHSS operation, system modes, their duration, and associated current consumption. Therefore, in what follows, we briefly introduce the LR1120 modem and respective development kit, which we will further use in our experimental campaign."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "2.2.1",
|
| 85 |
+
"parent_section_id": "2.2",
|
| 86 |
+
"section_name": "II-B1 Power amplifiers",
|
| 87 |
+
"text": "The LR1120 device includes a low-power amplifier (LPA) and high-power amplifier (HPA) to facilitate its sub-GHz operations. The selection between these amplifiers depends on the configured transmit power (). The LPA is primarily optimized for below +14 dBm, while it can also support +15 dBm. In contrast, the HPA is specifically designed for generating up to +22 dBm [7 ###reference_b7###]."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "2.2.2",
|
| 91 |
+
"parent_section_id": "2.2",
|
| 92 |
+
"section_name": "II-B2 System modes",
|
| 93 |
+
"text": "Fig. 3 ###reference_### illustrates the LR1120 operational modes. The power down mode has the lowest power consumption since it stops all clocks and does not retain any data. Similarly, the sleep mode offers a low power consumption option. However, it retains the configuration register values and stores the firmware data in random-access memory (RAM).\n###figure_3### .\nUpon startup (S_Startup), the bootloader performs firmware validation before transitioning to the standby state (S_STBY_RC), the default mode of LR1120. The standby modes, S_STBY_RC and S_STBY_XOSC, differ in clock oscillator selection, with the former utilizing the internal Resistance-Capacitance (RC) oscillator at 32 MHz. At the same time, the latter employs an external crystal clock, allowing faster transitions to other modes. To transmit a packet, the device leaves the standby mode and enters frequency synthesis (S_FS) mode to activate the Phase-Locked Loop and associated regulators.\nThe LR1120 user manual (see Figure 2-1 in [7 ###reference_b7###]) does not present a radio preparation mode. However, our measurements indicate that the device enters a radio preparation (S_Radio_Prepare) state for around 100 milliseconds before transmission (S_TX) mode. Next, in S_TX mode, the LR1120 transmits the LR-FHSS packet at the configured carrier frequency and DR using the relevant power amplifier to achieve the desired output power. After completing the transmission, the device enters a radio off (S_Radio_Off) mode, deactivating the power amplifier and regulators. It\u2019s worth noting that this mode is not documented in the LR1120 state diagram (see Figure 2-1 in [7 ###reference_b7###]). Finally, the device reverts to the default standby mode (S_STBY_RC). However, the code we use for testing specifically configures the device to enter sleep (S_Sleep) mode and remain there until the next transmission."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "3",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "III Experimental Setup",
|
| 99 |
+
"text": "###figure_4###"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "3.1",
|
| 103 |
+
"parent_section_id": "3",
|
| 104 |
+
"section_name": "III-A Hardware",
|
| 105 |
+
"text": "This work uses LR1120 development kits as a reference LR-FHSS transmitter for our measurement [7 ###reference_b7###]. Notably, our current consumption and ToA model will remain valid for other LR-FHSS devices. However, different power amplifier configurations could impact the current consumption in transmission (S_TX) mode. Any such differences can be incorporated into our model through minor adjustments, which can be done later depending on the availability of LR-FHSS new device models.\nFig. 4 ###reference_### shows the structural diagram of our experimental setup. One can see that LR-FHSS-enabled LR1120 is further connected to the ST Microelectronic NUCLEO-L476RG, which serves as a host Microcontroller Unit (MCU). We installed software on the host MCU responsible for programming the LR1120. MCU and LR1120 radio communication based on Serial Peripheral Interface (SPI) [19 ###reference_b19###]. For measuring the current consumption, duration of each operation state, and ToA, we utilized the Agilent N6705B DC Power Analyzer. Specifically, we performed the following steps:\nTo ensure accurate measurements and minimize energy consumption and fluctuations, we removed the light-emitting diode (LED) (LD1) from the MCU. Subsequently, we mounted the LR1120 onto the MCU. We established a connection between the MCU and a computer to facilitate the installation of the LR-FHSS software and overall control over the experiment.\nNext, to monitor the current consumption of the LR-FHSS-enabled LR1120, we removed the VDD_RADIO jumper and directly connected VDD_RADIO to the positive output (red wire in Fig. 4 ###reference_###) of a DC Power Analyzer. Simultaneously, we used the MCU and LR1120 radio ground (GND) as a common reference and connected it to the negative output (green wire in Fig. 4 ###reference_###) of the DC Power Analyzer. We utilized a 3.3 V DC supply for the measurements, with a maximum current limit of 180 mA current and a sampling period of 20.48 microseconds (s). The MCU was directly powered from a computer using a mini USB connector cable. This setup exclusively enables the current consumption and timing measurement of different LR-FHSS modes without considering the current consumption of the host MCU.\nIn the third step, we placed the testbed inside a dbSAFE box to ensure RF isolation.\nFinally, we connected and configured the Nooelec RTL-Software Defined Radio (SDR) to a computer, enabling us to observe and visualize the frequency spectrum of the LR-FHSS transmissions."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "3.2",
|
| 109 |
+
"parent_section_id": "3",
|
| 110 |
+
"section_name": "III-B Software",
|
| 111 |
+
"text": "We program the MCU using the LR11xx radio drivers published by Semtech on GitHub [17 ###reference_b17###]. It\u2019s important to note that this driver does not implement LR-FHSS reception. We use the Keil IDE to introduce changes and build the software, specifically implementing three modifications in the code:\nFirstly, in LR1110_lr_fhss_ping.c file, we disabled the TX and RX LEDs of LR1120 to prevent measurement fluctuations and additional current consumption.\nSecondly, by introducing changes to the code, we configure the radio to transit into sleep mode after transmitting a packet, even though the default mode was standby mode. Specifically, we modify the enter_standby_then_sleep_mode() function in the LR1110_lr_fhss_ping.c file.\nThird, we update the LR1110_lr_fhss_ping.c file to enable the following three measurement scenarios\n10 bytes constant payload size and variable ranging from 0 to 22 dBm;\nconstant 14 dBm and variable payload 10 to 65 bytes;\nrepeating the measurements above for DR8 and DR9.\nLastly, we developed a MATLAB script for processing the collected data with the following functionalities:\nallows identification of different transition states involved in LR-FHSS transmission,\ncalculates duration and current consumption for each state, and\ntotal ToA calculation of transmission (S_TX) mode."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "4",
|
| 115 |
+
"parent_section_id": null,
|
| 116 |
+
"section_name": "IV Measurement results",
|
| 117 |
+
"text": "A series of test measurements were initially conducted to ensure the accuracy and reliability of the hardware and software configuration. Fig. 5 ###reference_### depicts the LR-FHSS packet spectrum captured by CubicSDR software, visually representing the intra-packet frequency hopping. This validation step confirms the proper functioning and appropriateness of the implemented hardware and software setup.\n###figure_5### We perform several measurements to characterize LR-FHSS average current consumption and ToA. To accomplish the former, we conduct twenty measurements under varying levels, ranging from 0 dBm to 22 dBm, specifically for DR8 and DR9. To develop a realistic ToA model, we carry out measurements across a range of variable payload sizes, spanning from 10 to 65 bytes, for both LR-FHSS DRs, i.e., DR8 and DR9. This accounts for different code rates and header diversity configurations of the DRs.\n###figure_6### Fig. 6 ###reference_### shows the current consumption for just one out of twenty experimental rounds. One can see that the current consumption increases significantly after 14 dBm when the power amplifier switches from LPA to HPA. Fig. 7 ###reference_### illustrates the current consumption profile of a single LR-FHSS transmission. The transmission undergoes different states, as mentioned in Section II-B ###reference_###. Each state within the transmission process reveals distinct characteristics regarding duration and associated current consumption. Table II ###reference_### summarizes these states, their duration, and the average current consumption computed over all 20 experiment runs.\n###figure_7### Specifically, in Fig. 7 ###reference_###, one can see that initially, the device is in sleep mode, which has the lowest current consumption of 0.053 mA. To transmit a packet, the device wakes up (State 1) and enters the default standby mode (State 2). Notably, the duration of this state depends on the payload, as reported in Fig. 8 ###reference_###. However, it is worth mentioning that the average current consumption in the standby state remains constant 1.229 mA, irrespective of the payload size, DRs, and levels. Subsequently, the device engages in frequency synthesis named State 3. Similar to the standby mode (State 2), the duration of frequency synthesis mode depends on the payload size as shown in Fig. 8 ###reference_###.\n###table_1### ###figure_8### In the next phase, the LR1120 device prepares the radio packet for transmission. Our measurement results reveal that the duration and current consumption of the radio preparation mode (State 4) remains consistent around 99.67 ms 2.968 mA across all configurations.\nFollowing it, the device initiates the packet transmission (State 5), which has the highest current consumption compared to other states. The current consumption is directly influenced by the selected transmit power level.\nThe duration of the transmission state corresponds to the packet ToA, which is influenced by the payload size and the chosen DRs. From the measurements, we identify LR-FHSS exhibits an average transition time 0.61 ms to switch the OBW channel for the next hop, as illustrated in Fig. 9 ###reference_###. However, it is important to note that the total transition time experienced during packet transmission depends on the number of header replicas and payload fragments. To accurately model the LR-FHSS ToA, it is crucial to consider the total transition time.\nUpon completing the transmission, the device transitions to the radio off mode (State 6) and returns to the default standby mode (State 7). Unlike the initial standby mode (State 2), the duration of the final standby state remains consistent across different configurations and payload sizes. Finally, the device returns to the sleep (State 8). Unlike LoRa, LR-FHSS is designed to support uplink transmissions only. Therefore, in our experiments, the receiving windows were disabled, and our measurement does not account for the current consumption in the receive mode. We focus on analyzing and modeling the current consumption during the transmit process of LR-FHSS modulation. Notably, the LR-FHSS-enabled devices can use LoRa modulation for downlink communication. Therefore, existing LoRa ToA and energy consumption models [13 ###reference_b13###, 14 ###reference_b14###] can be used for the downlink analysis.\n###figure_9###"
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "5",
|
| 121 |
+
"parent_section_id": null,
|
| 122 |
+
"section_name": "Proposed analytical Models",
|
| 123 |
+
"text": ""
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5.1",
|
| 127 |
+
"parent_section_id": "5",
|
| 128 |
+
"section_name": "LR-FHSS Time-on-Air",
|
| 129 |
+
"text": "Our research is grounded in real LR-FHSS measurement results presented in Section IV ###reference_### and references to Semtech documents [4 ###reference_b4###] and the LR1120 software [7 ###reference_b7###]. We leverage this information to develop an accurate analytical model for LR-FHSS ToA. We consider ToA as the total time required to transmit all the encoded bits, including header, payload, preambles, and CRC at a given bit rate . Additionally, we account for the transition time required to switch the OBW channels during frequency hopping as illustrated in Fig. 9 ###reference_###. The proposed model takes into account these facts to calculate the total ToA as\nwhere is the number of transitions to change OBW channels for intra-packet frequency hopping, and is the total number of encoded bits in an LR-FHSS packet calculated as\nwhere represents the total number of encoded payload bits in the LR-FHSS packet, including the preamble ( 2 bits) for each payload fragment as\nwhere denotes the number of payloads plus CRC bits after the FEC coding, i.e.,\nwhere is the number of uncoded physical layer payload in bytes, and 6 denotes the number of overhead bits defined in LR-FHSS software [17 ###reference_b17###]. Finally, the number of payload fragments in an LR-FHSS packet is given by"
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5.2",
|
| 133 |
+
"parent_section_id": "5",
|
| 134 |
+
"section_name": "LR-FHSS Current Consumption",
|
| 135 |
+
"text": "LR-FHSS uses multiple OBW channels for intra-packet frequency hopping during a single transmission. Each transition between OBW channels, known as a hopping shift, requires a transition time (). It is important to note that the current consumption undergoes a substantial reduction during these transitions, reaching as low as 4.94 mA, as demonstrated in Fig. 9 ###reference_###. We account for this aspect in our work to accurately model the current consumption behavior of LR-FHSS transmissions.\nLet 8 be the number of states, and represent the duration and current consumption of these states as given in Table II ###reference_###. Thus, the total average current consumption becomes\nwhere account for the total drop in current consumption during the OBW channel changes for intra-packet frequency hopping as\nwhere is the average current consumption during the transition time . In Fig. 9 ###reference_###, the drop in current from to and then raise in current from to exhibit a triangle shape. Using the triangle centroid formula, the equation for the average current consumption can be expressed as\nIn (9 ###reference_###) and (10 ###reference_###), is the reporting period, the time between two consecutive periodic transmissions calculated as\nwhere represents the total duration of all active states combined.\nNext, these results can be used to estimate, e.g., a theoretical lifetime of operation from the battery. For example, if to imply a linear battery model, the lifetime is given by:\nwhere is battery capability expressed in mAh. Note that (15 ###reference_###) estimates the maximum lifespan of an ideal battery; however, real-world batteries degrade and experience a decline in their rated capacity over time."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "6",
|
| 139 |
+
"parent_section_id": null,
|
| 140 |
+
"section_name": "VI Discussion",
|
| 141 |
+
"text": "Fig 10 ###reference_### shows the LR-FHSS ToA as a function of variable payload. The ToA values obtained from our proposed analytical model closely align with the experimental measurements, confirming the validity of our model. However, the existing state-of-the-art Model I [16 ###reference_b16###] reports up to 55 ms higher ToA than the results obtained from our analytical model and measured data. Conversely, Model II [8 ###reference_b8###, 15 ###reference_b15###] gives a ToA approximately 47 ms lower.\nThe lower section of Fig. 10 ###reference_### reveals the relative error of ToA from different models relative to the measurement results. One can see that the proposed analytical ToA model features the lowest relative error, which is only 0.3%. For 10 bytes payload, Model I and Model II demonstrate the maximum error of -9.2% and 3.4%, respectively. These differences and fluctuations are mainly due to the mathematical formulation of (1 ###reference_###) and (2 ###reference_###), specifically, the Model I depicts sawtooth behavior because (1 ###reference_###) does not account for the fact that the last payload fragment could be shorter than 102.4 ms. Thus, this model introduces significant inaccuracy. Notably, the relative maximum error gradually decreases with the increase of the payload size for all the models. Our proposed approach reveals the best performance among these models, demonstrating the smallest relative error as we account for transition time .\n###figure_10### In Fig. 11 ###reference_###, we compare the average current consumption during one packet transmission obtained from the measurement and our analytical model as a function of variable transmit power. The results reveal a strong correlation, thereby confirming the accuracy of our LR-FHSS current consumption model. To our knowledge, no available research or literature presents a current consumption model for LR-FHSS. Therefore, we can not compare our results to any other model.\n###figure_11### ###figure_12### Finally, we theoretically evaluate the battery life of an LR-FHSS-enabled device. In the upper chart of Fig. 12 ###reference_###, we present the theoretical battery life as a function of variable transmission power. We imply the device transmits 10-byte packets every 15 minutes. DR9 demonstrates a better lifetime owing to lower ToA. For both DRs, the battery lifetimes remain above 3.6 years up to a transmit power of 14 dBm. However, when the transmit power exceeds 14 dBm, and the device begins operating on HPA, the battery life gradually decreases to almost half - nearly 2.2 and 1.58 years for DR8 and DR9, respectively, at the maximum transmit power.\nMore than this, the lower chart of Fig.12 ###reference_### illustrates how the battery lifespan changes based on the notification time (). As expected, our results show that longer increases battery life. Specifically, when using 5-minute intervals, the battery remains operational for approximately 2.3 and 3 years for DR8 and DR9, respectively. However, with a 30-minute notification interval, the battery\u2019s lifespan exceeds 4.2 years.\nAt the time of our experiments, only LR1120 model development kits were commercially available. As discussed in Section II-B ###reference_### LR1120 development kit and confirmed by our measurement results in Fig. 6 ###reference_### and Fig. 7 ###reference_###, the time duration and current consumption change across different operational states. The current consumption in the transmission (S_TX) mode may exhibit variations in future LR-FHSS models, particularly if they incorporate different power amplifiers and radio designs. These possible variations can be easily added to our model with slight modifications. LR1121 [20 ###reference_b20###] is a brand-new LR-FHSS device, it exhibits the same transmit mode power consumption characteristics as LR1120 (refers to Table 3-6: Transmit Mode Power Consumption of LR1120 in [21 ###reference_b21###] and LR1121 in [20 ###reference_b20###]). We expect our ToA model to remain valid for all the LR-FHSS devices, regardless of potential variations in current consumption."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "7",
|
| 145 |
+
"parent_section_id": null,
|
| 146 |
+
"section_name": "VII Conclusion",
|
| 147 |
+
"text": "LR-FHSS is emerging as a potential connectivity solution for battery-powered and tiny satellite IoT devices operating in hard-to-reach areas. Frequently replacing a device\u2019s battery in remote industrial or fragile natural areas, e.g., offshore wind farms to give just one illustrative example, is challenging and costly. The study of the LR-FHSS time and energy profiles provides important insights for further communication and application operation optimizations; it is also crucial for understanding this technology\u2019s feasibility and potential performance for practical applications and use cases.\nThe main contributions of this paper are the LR-FHSS current consumption and the ToA analytical model based on real experimental measurements. First, we conduct extensive measurements for variable transmit power, Date Rates, and message payload size. The results offer valuable insights into the various system states associated with LR-FHSS transmissions, including their respective timing and current consumption characteristics. Importantly, this study identifies additional states, such as Radio Preparation and Radio off, which were not previously reported in the LR-FHSS-enabled LR1120 development kit user manual.\nNotably, our proposed ToA model demonstrates better accuracy than existing state-of-the-art models. Similarly, this work introduces the first analytical current consumption model for LR-FHSS. More than this, our paper discusses the average current consumption and battery lifetime as a function of variable transmit power and the transmission notification time. Our models and results can be useful for further studies investigating the medium access control aspects and battery lifetime of LR-FHSS-enabled devices. They enable us to study the feasibility and performance of the applications (e.g., industrial use cases) based on them. In future, we aim to expand our experimental measurements to examine the current and ToA characteristics of forthcoming LR-FHSS device models. Also, we consider comparing the current consumption and battery life of LoRa, NB-IoT, and RedCap technology as one research direction."
|
| 148 |
+
}
|
| 149 |
+
],
|
| 150 |
+
"appendix": [],
|
| 151 |
+
"tables": {
|
| 152 |
+
"1": {
|
| 153 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">TABLE I: </span>Comparison of LoRa and LR-FHSS key features.</figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_centering ltx_figure_panel ltx_align_middle\" id=\"S2.T1.6\">\n<tr class=\"ltx_tr\" id=\"S2.T1.6.7\">\n<td class=\"ltx_td ltx_border_tt\" id=\"S2.T1.6.7.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.7.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.7.2.1\">LoRa</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.6.7.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.6.7.3.1\">LR-FHSS</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.6.8.1\">Modulation</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.6.8.2\">CSS</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.6.8.3\">GMSK</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.2.3\">Code rate</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.2.2\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.9\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.9.1\">Daily Maximum Capacity <sup class=\"ltx_sup\" id=\"S2.T1.6.9.1.1\">1</sup>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.9.2\">1.2M uplinks/GW</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.9.3\">11M uplinks/GW</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.10\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.10.1\">Frequency Drift Tolerance</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.10.2\">120 Hz/s</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.10.3\">300 Hz/s</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.11\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.11.1\">Uplink</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.11.2\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.6.11.3\">\u2713</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.3.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.3.3.2\">Downlink</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.3.3.3\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.3.3.1\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.4.4.2\">Spreading factors</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.4.4.3\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.4.4.1\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.5.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.5.5.2\">Header Diversity</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.5.5.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.5.5.3\">\u2713</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T1.6.6.2\">Intra-packet hopping</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T1.6.6.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T1.6.6.3\">\u2713</td>\n</tr>\n</table>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<ul class=\"ltx_itemize ltx_centering ltx_figure_panel\" id=\"S2.I1\">\n<li class=\"ltx_item\" id=\"S2.I1.ix1\" style=\"list-style-type:none;\">\n<span class=\"ltx_tag ltx_tag_item\">1</span>\n<div class=\"ltx_para\" id=\"S2.I1.ix1.p1\">\n<p class=\"ltx_p\" id=\"S2.I1.ix1.p1.1\">Capacity in million (M) uplink packets per day per gateway for a 1.5\u00a0MHz channel bandwidth i.e., operating channel width (OCW)\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#bib.bib4\" title=\"\">4 ###reference_b4###</a>]</cite>.</p>\n</div>\n</li>\n</ul>\n</div>\n<div class=\"ltx_flex_break\"></div>\n</div>\n</figure>",
|
| 154 |
+
"capture": "TABLE I: Comparison of LoRa and LR-FHSS key features."
|
| 155 |
+
},
|
| 156 |
+
"2": {
|
| 157 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">TABLE II: </span>LR-FHSS states during packet transmission, their duration, and average current consumption</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.T2.19\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.2.2.3\">State</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.2.2.4\">State</td>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S4.T2.1.1.1\">\n<span class=\"ltx_text\" id=\"S4.T2.1.1.1.2\"></span> <span class=\"ltx_text\" id=\"S4.T2.1.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T2.1.1.1.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T2.1.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T2.1.1.1.1.1.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.1.1.1.1.1.1.1.1.1\" style=\"font-size:80%;\">Duration </span></span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T2.1.1.1.3\"></span></th>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T2.2.2.5\"></td>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S4.T2.2.2.2\">\n<span class=\"ltx_text\" id=\"S4.T2.2.2.2.2\"></span> <span class=\"ltx_text\" id=\"S4.T2.2.2.2.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T2.2.2.2.1.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T2.2.2.2.1.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T2.2.2.2.1.1.1.1.1\"><span class=\"ltx_text\" id=\"S4.T2.2.2.2.1.1.1.1.1.1\" style=\"font-size:80%;\">Current </span></span></span>\n</span></span><span class=\"ltx_text\" id=\"S4.T2.2.2.2.3\"></span></th>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T2.2.2.6\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.3.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.3.3.1\">number \n</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.3.3.2\">name</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.3.3.3\">sym</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.3.4\">ms</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.3.3.5\">sym</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.3.3.6\">mA</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.5.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.5.5.3\">1</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.5.5.4\">Wake up</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.4.4.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.5.5.5\">0.4301</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T2.5.5.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.5.5.6\">1.9</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.7.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.7.7.3\">2</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.7.7.4\">Standby</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.6.6.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.7.7.5\">Fig.\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#S4.F8\" title=\"Figure 8 \u2023 IV Measurement results \u2023 Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS\"><span class=\"ltx_text ltx_ref_tag\">8</span></a>\n</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.7.7.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.7.7.6\">1.229</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.9.9\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.9.9.3\">3</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.9.9.4\">FS</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.8.8.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.9.9.5\">Fig.\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#S4.F8\" title=\"Figure 8 \u2023 IV Measurement results \u2023 Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS\"><span class=\"ltx_text ltx_ref_tag\">8</span></a>\n</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.9.9.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.9.9.6\">3.7392</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.11.11\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.11.11.3\">4</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.11.11.4\">Radio Pre.</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.10.10.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.11.11.5\">99.67</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.11.11.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.11.11.6\">2.968</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.13\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.13.3\">5</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.13.13.4\">Transmission</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.12.12.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.13.5\">(<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#S5.E4\" title=\"In V-A LR-FHSS Time-on-Air \u2023 V Proposed analytical Models \u2023 Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS\"><span class=\"ltx_text ltx_ref_tag\">4</span></a>)</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.13.13.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.13.6\">Fig.\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#S4.F6\" title=\"Figure 6 \u2023 IV Measurement results \u2023 Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS\"><span class=\"ltx_text ltx_ref_tag\">6</span></a>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.15.15\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.15.15.3\">6</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.15.15.4\">Radio off</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.14.14.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.15.15.5\">9.45</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.15.15.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.15.15.6\">4.94</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.17.17\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.17.17.3\">7</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.17.17.4\">Standby</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.16.16.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.17.17.5\">1.044</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T2.17.17.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.17.17.6\">1.229</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.19.19\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.19.19.3\">8</td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T2.19.19.4\">Sleep</td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T2.18.18.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.19.19.5\">(<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09954v1#S5.E13\" title=\"In V-B LR-FHSS Current Consumption \u2023 V Proposed analytical Models \u2023 Experiment-based Models for Air Time and Current Consumption of LoRaWAN LR-FHSS\"><span class=\"ltx_text ltx_ref_tag\">13</span></a>)</td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S4.T2.19.19.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.19.19.6\">0.053</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 158 |
+
"capture": "TABLE II: LR-FHSS states during packet transmission, their duration, and average current consumption"
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
"image_paths": {
|
| 162 |
+
"1": {
|
| 163 |
+
"figure_path": "2408.09954v1_figure_1.png",
|
| 164 |
+
"caption": "Figure 1: The key components of the LR-FHSS packet structure.",
|
| 165 |
+
"url": "http://arxiv.org/html/2408.09954v1/x1.png"
|
| 166 |
+
},
|
| 167 |
+
"2": {
|
| 168 |
+
"figure_path": "2408.09954v1_figure_2.png",
|
| 169 |
+
"caption": "Figure 2: Illustrative frequency hopping profile of a single LR-FHSS packet of payload L=15\ud835\udc3f15L=15italic_L = 15 bytes and DR8 featuring code rate =1313\\frac{1}{3}divide start_ARG 1 end_ARG start_ARG 3 end_ARG and NH=3subscript\ud835\udc41\ud835\udc3b3N_{H}=3italic_N start_POSTSUBSCRIPT italic_H end_POSTSUBSCRIPT = 3",
|
| 170 |
+
"url": "http://arxiv.org/html/2408.09954v1/x2.png"
|
| 171 |
+
},
|
| 172 |
+
"3": {
|
| 173 |
+
"figure_path": "2408.09954v1_figure_3.png",
|
| 174 |
+
"caption": "Figure 3: LR1120 operational modes, where the states marked by filled green circles are not reported in the user manual [7]. We identify these green states from experimental measurements and analyse the code of the transceiver\u2019s firmware, while the orange ones are irrelevant in this work. Notably, colour borders distinguish between the different states and build a relation with Fig. 7",
|
| 175 |
+
"url": "http://arxiv.org/html/2408.09954v1/x3.png"
|
| 176 |
+
},
|
| 177 |
+
"4": {
|
| 178 |
+
"figure_path": "2408.09954v1_figure_4.png",
|
| 179 |
+
"caption": "Figure 4: The structural diagram of the experimental setup for current consumption measurements of LR-FHSS transmissions.",
|
| 180 |
+
"url": "http://arxiv.org/html/2408.09954v1/x4.png"
|
| 181 |
+
},
|
| 182 |
+
"5": {
|
| 183 |
+
"figure_path": "2408.09954v1_figure_5.png",
|
| 184 |
+
"caption": "Figure 5: Frequency spectrum of LR-FHSS illustrating intra-packet frequency hopping profile of a single LR-FHSS packet of L=\ud835\udc3fabsentL=italic_L = 15-byte payload and DR8 featuring code rate =1313\\frac{1}{3}divide start_ARG 1 end_ARG start_ARG 3 end_ARG and NH=3subscript\ud835\udc41\ud835\udc3b3N_{H}=3italic_N start_POSTSUBSCRIPT italic_H end_POSTSUBSCRIPT = 3.",
|
| 185 |
+
"url": "http://arxiv.org/html/2408.09954v1/x5.png"
|
| 186 |
+
},
|
| 187 |
+
"6": {
|
| 188 |
+
"figure_path": "2408.09954v1_figure_6.png",
|
| 189 |
+
"caption": "Figure 6: The impact of variable transmit power on LR-FHSS radio current consumption for one experimental round. We have conducted twenty rounds of measurements, resulting in over 460 packet transmissions for each DR.",
|
| 190 |
+
"url": "http://arxiv.org/html/2408.09954v1/x6.png"
|
| 191 |
+
},
|
| 192 |
+
"7": {
|
| 193 |
+
"figure_path": "2408.09954v1_figure_7.png",
|
| 194 |
+
"caption": "Figure 7: LR-FHSS current consumption for an illustrative packet transmission with different states of transmissions color-marked according to the operational modes shown in Fig. 3.",
|
| 195 |
+
"url": "http://arxiv.org/html/2408.09954v1/x7.png"
|
| 196 |
+
},
|
| 197 |
+
"8": {
|
| 198 |
+
"figure_path": "2408.09954v1_figure_8.png",
|
| 199 |
+
"caption": "Figure 8: The impact of payload size on the timings of standby and frequency synthesis mode.",
|
| 200 |
+
"url": "http://arxiv.org/html/2408.09954v1/x8.png"
|
| 201 |
+
},
|
| 202 |
+
"9": {
|
| 203 |
+
"figure_path": "2408.09954v1_figure_9.png",
|
| 204 |
+
"caption": "Figure 9: The transition time between the hop change during the intra-packet frequency hopping.",
|
| 205 |
+
"url": "http://arxiv.org/html/2408.09954v1/x9.png"
|
| 206 |
+
},
|
| 207 |
+
"10": {
|
| 208 |
+
"figure_path": "2408.09954v1_figure_10.png",
|
| 209 |
+
"caption": "Figure 10: Top section of the figure compares the ToA results obtained from measurements, the proposed model, and the state-of-the-art models [16, 8, 15] as a function of variable payload. The bottom part of the figure reveals the error percentage when compared to the measurement ToA.",
|
| 210 |
+
"url": "http://arxiv.org/html/2408.09954v1/x10.png"
|
| 211 |
+
},
|
| 212 |
+
"11": {
|
| 213 |
+
"figure_path": "2408.09954v1_figure_11.png",
|
| 214 |
+
"caption": "Figure 11: Comparison of measured and analytical average current consumption (Iavg\u00af\u00afsubscript\ud835\udc3cavg\\overline{I_{\\text{avg}}}over\u00af start_ARG italic_I start_POSTSUBSCRIPT avg end_POSTSUBSCRIPT end_ARG) for single packet transmission as a function of transmit power (Pt\u2062xsubscript\ud835\udc43\ud835\udc61\ud835\udc65P_{tx}italic_P start_POSTSUBSCRIPT italic_t italic_x end_POSTSUBSCRIPT).",
|
| 215 |
+
"url": "http://arxiv.org/html/2408.09954v1/x11.png"
|
| 216 |
+
},
|
| 217 |
+
"12": {
|
| 218 |
+
"figure_path": "2408.09954v1_figure_12.png",
|
| 219 |
+
"caption": "Figure 12: The subplot at the top presents the battery theoretical lifetime for 10 bytes LR-FHSS transmission as a function of transmit power and notification time. The lower subplot implies a constant 14 dBm transmit power.",
|
| 220 |
+
"url": "http://arxiv.org/html/2408.09954v1/x12.png"
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
"validation": true,
|
| 224 |
+
"references": [],
|
| 225 |
+
"url": "http://arxiv.org/html/2408.09954v1"
|
| 226 |
+
}
|
20240819/2408.09962v1.json
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Validation of the Results of Cross-chain Smart Contract Based on Confirmation Method",
|
| 3 |
+
"abstract": "Smart contracts are widely utilized in cross-chain interactions, where their results are transmitted from one blockchain (the producer blockchain) to another (the consumer blockchain). Unfortunately, the consumer blockchain often accepts these results without executing the smart contracts for validation, posing potential security risks. To address this, we propose a method for validating cross-chain smart contract results. Our approach emphasizes consumer blockchain execution of cross-chain smart contracts of producer blockchain, allowing comparison of results with the transmitted ones to detect potential discrepancies and ensure data integrity during cross-chain data dissemination. Additionally, we introduce the confirmation with proof method, which involves incorporating the chain of blocks and relevant cross-chain smart contract data from the producer blockchain into the consumer blockchain as evidence (or proof), establishing a unified and secure perspective of cross-chain smart contract results. Our verification results highlight the feasibility of cross-chain validation at the smart contract level.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Cross-chain technologies have become increasingly important in blockchain ecosystems, enabling seamless communication and data exchange across different blockchain networks [1 ###reference_b1###]. They have been widely used in various industries, including financial services [2 ###reference_b2###], supply chain management [3 ###reference_b3###], and healthcare [4 ###reference_b4###]. The cross-chain capabilities have helped improve information and value transfer, and have made digital transactions more transparent, secure, and efficient.\nCross-chain interaction refers to a fundamental concept in which the state or actions of one blockchain (referred as the producer blockchain in this paper) depend on the state or actions of another blockchain (referred to as the consumer blockchain in this paper). It is within this dynamic interaction that the potential for security challenges and risks arises. Ensuring the accuracy and trustworthiness of cross-chain data and the results of smart contracts is paramount, as any validation failure could lead to erroneous state changes in the consumer blockchain. Therefore, the validation of cross-chain data and smart contract results is of utmost importance in cross-chain scenarios. Figure 1 ###reference_### shows one example of cross-chain validation.\n###figure_1### Validating cross-chain smart contracts is a crucial prerequisite step in cross-chain interactions. Unlike validating simple cross-chain data, validating the results of smart contracts is far more complex. Smart contracts are intricate, self-executing agreements with predefined conditions and consequences, making it necessary to run the smart contract to verify its correctness.\nThere are two challenging issues during the cross-chain smart contract validation:\n(1) Cross-chain Validation method: Cross-chain validation is a critical process that typically involves executing smart contracts to ensure their integrity. While it\u2019s important for this process to execute smart contracts, it shouldn\u2019t disrupt the functioning of smart contracts within the consumer blockchain. Additionally, the execution of cross-chain smart contracts can consume resources within the consumer blockchain. In this context, the paper investigates cross-chain validation methods within separate running environments, taking into account resource utilization, to suit the requirements of various blockchain ecosystems.\n(2) State Unification: Blockchain forks, which occur when a blockchain splits into multiple potentially conflicting chains of blocks, present a significant challenge during the validation process. In such scenarios, ensuring a unified state of the producer blockchain within the consumer blockchain becomes imperative. This paper introduces an innovative approach to confirm the chain of block information originating from the producer blockchain into the consumer blockchain. This confirmation process helps to unify the consumer blockchain\u2019s perspective of the producer blockchain.\nThe contributions of this paper are summarized as follows:\n(1) Validation methods of cross-chain smart contract: This paper introduces a method for validating cross-chain smart contracts. Prior research has predominantly focused on validating blockchain data, with limited attention given to validating the results of smart contracts. However, the validation of cross-chain smart contract is a crucial aspect of ensuring the security and reliability of cross-chain interactions.\n(2) Resource-efficient validation methods: To address concerns related to resource allocation, the validation methods consider the resources occupation. First, we propose the metrics which measure the resources occupied in the cross-chain smart contract validation. Second, we propose validation methods that consider different resource utilization ratios. One of the proposed methods, referred to as collective validation, entails multiple nodes that trust each other utilizing a single node to perform cross-chain smart contracts. These nodes share the validation results among themselves, promoting a collaborative and resource-efficient validation process.\n(3) Cross-chain confirmation with proof: The method of confirmation with proof involves the integration of critical data from the producer blockchain into the consumer blockchain. This is accomplished by incorporating the chain of blocks and associated data from the producer blockchain as evidence (or proof) within the consumer blockchain. By securely embedding this information, the consumer blockchain gains a comprehensive and secure perspective of the cross-chain smart contract results.\nThe remaining sections of this paper are organized as follows. In Section II ###reference_###, we provide an overview of related work in the field of cross-chain.\nSection III ###reference_### describes the method of the validation of cross-chain smart contract.\nIn Section IV ###reference_###, we describe the method of cross-chain confirmation with proof for cross-chain smart contract.\nIn Section VI ###reference_###, we present the verification results and conduct corresponding analyses.\nFinally, Section VII ###reference_### concludes this paper with a summary of our contributions and potential future research directions."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Related Work",
|
| 15 |
+
"text": "In this section, we present a comprehensive review of existing approaches to cross-chain validation, encompassing both cross-chain data propagation, which involves the process of data validation, and various cross-chain data validation methods."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A Cross-chain Data Propagation Methods",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.1.1",
|
| 25 |
+
"parent_section_id": "2.1",
|
| 26 |
+
"section_name": "II-A1 User-Initiated Cross-chain Data Transfer",
|
| 27 |
+
"text": "User-initiated cross-chain data transfer represents a fundamental approach to cross-chain interactions. In this paradigm, users actively participate in transferring data from one blockchain to another, frequently employing techniques such as hash locking [5 ###reference_b5###] where users use a secret number (or its hash) set in one blockchain as data to unlock assets in another blockchain. This method empowers users with a high degree of control and flexibility over the data transfer process.\nIn this approach, users initiate and oversee the entire data transfer operation. Transaction validation primarily hinges on the user\u2019s discretion and is carried out manually. Users decide when and how to initiate data transfer transactions, ensuring that the data being moved from one blockchain to another aligns with their intended use cases.\nHowever, it\u2019s crucial to acknowledge that this method has its limitations. While it allows for direct user involvement and control, it simultaneously introduces the potential for human errors. Users must meticulously manage the data transfer process, including validating the data\u2019s integrity and ensuring it corresponds accurately to the destination blockchain\u2019s requirements. This manual approach becomes increasingly challenging and error-prone in scenarios involving intricate cross-chain interactions or frequent data transfers [6 ###reference_b6###].\nFor complex cross-chain applications requiring a high level of reliability and data accuracy, the user-initiated data transfer approach may be less suitable. As such, alternative methodologies, such as oracle services and direct cross-chain data propagation, have emerged to address these limitations and streamline cross-chain data transfer processes."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.1.2",
|
| 31 |
+
"parent_section_id": "2.1",
|
| 32 |
+
"section_name": "II-A2 Oracle Services",
|
| 33 |
+
"text": "Oracle services have rapidly gained prominence as indispensable tools for facilitating cross-chain data transfer [7 ###reference_b7###]. These services function as intermediary computing servers, serving the critical role of automating the retrieval of cross-chain data and its seamless propagation to other blockchain networks.\nThe central premise of oracle services revolves around automation and efficiency. By leveraging oracle solutions, blockchain ecosystems can significantly streamline the process of acquiring data from external sources and ensuring its availability on the target blockchain. This automation significantly reduces the manual intervention required from users, enhancing the overall efficiency of cross-chain interactions.\nOne of the hallmark features of oracle-based solutions is their autonomous transaction validation capabilities [8 ###reference_b8###]. In this model, transaction validation is predominantly entrusted to the oracle service itself. This approach further reduces user involvement, as the oracle service acts as a trusted intermediary responsible for validating the data\u2019s integrity and compliance with predefined criteria.\nAlthough oracle services offer numerous advantages in terms of automation and data reliability, they may introduce concerns related to centralization if not implemented with meticulous attention to decentralization mechanisms. Centralized oracle services could potentially become single points of failure or manipulation, undermining the decentralized ethos of blockchain technology.\nTo mitigate these centralization concerns, modern oracle solutions often incorporate decentralization features [9 ###reference_b9###], such as multiple oracle nodes, data aggregation, and cryptographic validation. These mechanisms aim to preserve the integrity and security of cross-chain data transfer while maintaining a level of automation that rivals user-initiated approaches.\nIn summary, oracle services have emerged as a pivotal enabler of cross-chain data transfer, offering automation, reliability, and reduced user intervention. Nevertheless, their successful deployment hinges on striking a delicate balance between automation and decentralization to preserve the core principles of blockchain technology."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.1.3",
|
| 37 |
+
"parent_section_id": "2.1",
|
| 38 |
+
"section_name": "II-A3 Direct Cross-Chain Data Propagation",
|
| 39 |
+
"text": "Direct cross-chain data propagation [10 ###reference_b10###] has been facilitated by specialized components such as blockchain routers [11 ###reference_b11###], ushering in a new era of seamless data exchange and interaction between disparate blockchain networks. These innovative components serve as the backbone for enabling nodes from distinct blockchains to communicate directly and share data efficiently.\nAt the heart of this approach lies the concept of decentralized autonomy. Rather than relying on centralized intermediaries or user-initiated actions, direct cross-chain data propagation empowers the participating nodes themselves. These nodes, representing distinct blockchains, take an active role in the verification and validation of incoming data [6 ###reference_b6###].\nThe transaction validation process in direct cross-chain data propagation is notably distributed across these participating nodes. Each node is tasked with verifying the data relevant to its respective blockchain, ensuring its consistency, accuracy, and adherence to predefined rules and protocols. This collaborative validation approach effectively combines the benefits of automation with the principles of decentralization.\nOne of the key strengths of this approach is its ability to strike a harmonious balance between automation and decentralization. By leveraging the collective computational power and trustworthiness of nodes across different blockchains, direct cross-chain data propagation offers a robust and reliable means of data transfer and validation. This makes it an ideal choice for a wide spectrum of cross-chain applications, from simple asset transfers to more complex, multi-chain smart contract executions.\nWhile direct cross-chain data propagation exhibits significant promise, it is not without its challenges. Coordination among diverse blockchain networks and ensuring seamless interoperability can be technically intricate. Furthermore, robust security mechanisms must be in place to safeguard data during transmission and validate it effectively. Nevertheless, as this approach matures and becomes more widely adopted, it holds the potential to revolutionize how cross-chain interactions are conducted, fostering a new era of blockchain connectivity and collaboration.\nIn summary, direct cross-chain data propagation, facilitated by components like blockchain routers, exemplifies the ideal marriage of automation and decentralization. It empowers nodes from distinct blockchains to work together in validating and propagating data, offering a versatile solution for cross-chain applications while maintaining the fundamental principles of blockchain technology."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.2",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "II-B Cross-chain Data Validation",
|
| 45 |
+
"text": ""
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.2.1",
|
| 49 |
+
"parent_section_id": "2.2",
|
| 50 |
+
"section_name": "II-B1 Atomic Swaps",
|
| 51 |
+
"text": "Atomic swaps allow different blockchains to swap assets without any intermediary [12 ###reference_b12###]. These swaps are atomic, meaning that they either occur completely or not at all.\nThe advantage of atomic swaps is that they provide trustless asset exchange between different blockchains without the need for centralized exchanges or third parties. However, atomic swaps may require significant resources and time to set up and execute.\nData validation in atomic swaps occurs through the execution of pre-agreed rules and conditions agreed upon by both parties. These rules are typically encoded in smart contracts on both blockchains involved in the swap. The smart contracts ensure that the swap conditions are met before assets are released to the other party. Atomic swaps typically require both parties to cryptographically prove their holdings of assets to be swapped."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "2.2.2",
|
| 55 |
+
"parent_section_id": "2.2",
|
| 56 |
+
"section_name": "II-B2 Hash Time-Locked Contracts (HTLC)",
|
| 57 |
+
"text": "HTLCs allow two parties to exchange value across blockchains without trusting each other [13 ###reference_b13###]. HTLCs typically involve a hash commitment scheme where both parties commit to a random number or hash value before exchanging the assets. If the hashes match at a later time, the assets are released to the other party. HTLCs are commonly used in atomic swaps and provide trustless asset exchange between different blockchains. However, HTLCs may be vulnerable to double spending attacks if not implemented properly.\nData validation in HTLCs occurs through hash commitment schemes where both parties commit to random numbers or hashes before exchanging assets. The hashes serve as commitments to the transaction details, ensuring that both parties have agreed on the terms of the exchange. If both parties successfully match their hashes at a later time, the assets are released to the other party. HTLCs typically require both parties to cryptographically prove their holdings of assets to be swapped."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "2.2.3",
|
| 61 |
+
"parent_section_id": "2.2",
|
| 62 |
+
"section_name": "II-B3 Lightning Networks",
|
| 63 |
+
"text": "Lightning networks are layer-2 solutions for Bitcoin and other blockchains that allow for fast and low-cost transactions [14 ###reference_b14###] [15 ###reference_b15###]. They operate as payment channels between users on the blockchain network and can be used for cross-chain data validation. Lightning networks provide privacy and scalability advantages over traditional blockchain transactions and allow for faster and cheaper transactions. However, they may be vulnerable to routing attacks if not implemented properly.\nData validation in lightning networks occurs through multiple stages of transaction confirmation and validation between network nodes. Each transaction on the lightning network is typically confirmed multiple times by different nodes, forming a path of trustless routing nodes called \"channels\" that route transactions between users on the blockchain network. These channels maintain transactional records that are cryptographically encrypted and synchronized across participating nodes on the network, ensuring trustless operation and data validation."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "2.2.4",
|
| 67 |
+
"parent_section_id": "2.2",
|
| 68 |
+
"section_name": "II-B4 Polkadot",
|
| 69 |
+
"text": "Polkadot is a new blockchain protocol that connects different blockchains together using relays [16 ###reference_b16###]. It allows cross-chain data validation and transfer of assets between different blockchains. Polkadot\u2019s architecture includes a relay chain (or \"canister\") that anchors blockchains together, enabling interoperability between different chains. Relay chains validate and authenticate transactions between different chains, ensuring security and trustless operation. Polkadot also provides future compatibility for existing blockchain technologies such as Ethereum and Bitcoin, allowing them to interoperate with Polkadot\u2019s relay chain.\nData validation in Polkadot occurs through relay chains that anchor blockchains together and validate transactions between different chains [17 ###reference_b17###]. Relay chains maintain transactional records and cryptographically encrypted records of each transaction to ensure trustless operation across multiple chains. Additionally, Polkadot\u2019s architecture includes validation nodes called \"canister nodes\" that validate transactions and cryptographically seal them into the blockchain, ensuring data integrity and validation on the relay chain level."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "2.2.5",
|
| 73 |
+
"parent_section_id": "2.2",
|
| 74 |
+
"section_name": "II-B5 Cosmos",
|
| 75 |
+
"text": "Cosmos is also a blockchain network that connects different blockchains together using hubs and zones [18 ###reference_b18###]. It provides cross-chain data validation and atomic swaps between different blockchains. Cosmos\u2019 architecture includes hubs (or \"hub chains\"), which serve as central coordinators for multiple zones (or \"zone chains\") connected to them [19 ###reference_b19###]. Hub chains validate transactions and coordinate zone chains to ensure security and trustless operation across multiple chains. Atomic swaps can be performed between different zones connected to the same hub or between zones connected to different hubs within the Cosmos network.\nData validation in Cosmos occurs through hub chains that serve as central coordinators for multiple zones connected to them. Hub chains validate transactions and coordinate zone chains to ensure trustless operation across multiple chains connected to them. Atomic swaps can be performed between different zones connected to the same hub or between zones connected to different hubs within the Cosmos network, with data validation occurring through pre-agreed rules encoded in smart contracts on hub chains and zone chains involved in"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "2.3",
|
| 79 |
+
"parent_section_id": "2",
|
| 80 |
+
"section_name": "II-C Gap in Existing Approaches",
|
| 81 |
+
"text": "While the aforementioned approaches effectively address various aspects of cross-chain data validation, they often lack emphasis on the validation of smart contract. As a consequence, potential issues pertaining to the accuracy and reliability of cross-chain smart contract outcomes remain unaddressed. In this paper, we direct our focus towards this critical aspect, proposing methods to enhance the security and dependability of smart contract executions in cross-chain scenarios."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "3",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "III Cross-chain Validation of Results of Smart Contract",
|
| 87 |
+
"text": "Cross-chain validation involves the verification of data or smart contracts from other blockchains, which can be categorized into two corresponding aspects: data validation and smart contract validation.\nData validation entails the verification of cross-chain data originating from the producer blockchain, encompassing cross-chain-related transactions and blocks. This process confirms the occurrence of specific states within the producer blockchain.\nThe characteristics of cross-chain data validation is that there is no requirement of the execution of a smart contract. For example, a transaction in the chain of blocks of the producer blockchain, it can be verified by whether it is sent by the corresponding sender and whether the sender has enough balance or not. Contrarily, the validation of a smart contract includes the step to run the smart contract.\nSmart contract validation is the process of ensuring that the results of a smart contract\u2019s execution are accurate and valid. The aim is not to run the smart contract, but to check whether the corresponding results are correct or not. In the context of cross-chain interactions, it is crucial to verify the results of cross-chain smart contracts as the results will cause corresponding actions in consumer blockchain.\nThere are two different ways for the validation of smart contract, smart contract level validation (SCL validation) and block level validation (BL validation).\nIn the smart contract level validation, it is to run the smart contract and get the corresponding results to compare whether it is the same as the results in the block.\nIn the block level validation, it is just to check whether the block is correct (such as whether it is in the longest chain of blocks) and trust the block without running the smart contract. A block in the longest chain is deemed valid, and its associated data, including transactions and smart contract results, is considered valid as well. However, this straightforward validation method is not always applicable in cross-chain scenarios, as there is no equivalent mechanism in place during cross-chain propagation.\nThe reason to use the SCL validation is for the following three reasons. (1) In certain situations, results are calculated based on multiple states recorded in the blockchain, rather than through direct access. If the smart contract from the producer blockchain is not executed, a similar logic must be recreated on the consumer blockchain to derive the results from the related states. (2) In some blockchains, only the hash of results of smart contract are written to the blockchain, the consumer blockchain cannot use the hash for cross-chain interact directly. (3) When a miner seals in wrong results in the longest blockchain, it will take time for other nodes to correct the results, thus if the corresponding smart contract is not run directly, it will take more time to find the wrong results.\nHowever, there is currently a lack of research focused on the validation of the results of cross-chain smart contracts. This type of validation is crucial since the majority of existing blockchains employ smart contracts for implementing digital contracts, including those designed for cross-chain scenarios, and the results of these smart contracts hold importance in cross-chain interactions."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "3.1",
|
| 91 |
+
"parent_section_id": "3",
|
| 92 |
+
"section_name": "III-A Validation in Smart Contract Level",
|
| 93 |
+
"text": "The smart contract level validation is that the nodes in the producer blockchain run the smart contract of the producer blockchain to verify the results to avoid errors or cheating during cross-chain propagation.\nHowever, the resources required by smart contracts of consumer and producer blockchains may be conflict, they should run in isolated environments. Meanwhile, there are interacts between them, there should be cross environments methods to let nodes of consumer blockchain get the results of execution."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "3.1.1",
|
| 97 |
+
"parent_section_id": "3.1",
|
| 98 |
+
"section_name": "III-A1 Running Cross-chain Smart Contract in Separate Environment",
|
| 99 |
+
"text": "Running in a separate environment implies that cross-chain smart contracts from the producer blockchain are executed in an isolated setting. This distinct environment may leverage various technologies, including containers, virtual machines, or isolated runtimes. It\u2019s essential to clarify that the term \"cross-chain smart contract\" in this context specifically refers to smart contracts from producer blockchains, distinguishing them from smart contracts that are native to the local blockchain but used for cross-chain interactions.\nThe process of instantiating and running the smart contract involves the actual execution of the contract\u2019s logic, using the provided byte code and data to bring it to life within the blockchain environment.\n(1) Cross-chain Instantiation: The consumer blockchain synchronizes the transaction that carries the instantiation parameters (or deployment transactions), including the binary code of the smart contract, instantiation parameters, and related data. Cross-chain instantiation encompasses the creation of an instance of the cross-chain smart contract within a dedicated runtime environment on the consumer blockchain. During instantiation, the contract\u2019s byte code and any accompanying data are utilized to establish the initial state of the contract.\n(2) Invoking the Contract for Validation: After the smart contract is instantiated, it is activated by executing its methods or functions using the corresponding parameters. These parameters are conveyed via transactions, and as part of the cross-chain validation process, the corresponding invocation transactions must also be synchronized from the producer blockchain. It\u2019s important to note that this invocation is primarily for validation purposes, and all transactions have been previously recorded in the producer blockchain rather than being initiated interactively by the user.\n(3) Result Generation and Validation: As the execution progresses, the smart contract generates results that represent the outcomes of the contract\u2019s operations. Different from the producer blockchain, these results are used to verify the results of the producer blockchain instead of the state changes, transaction outputs, or data updates. The consumer blockchain will use the cross environment method provided by the environment of the producer blockchain to get the results, for example by using the HTTP query method."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "3.2",
|
| 103 |
+
"parent_section_id": "3",
|
| 104 |
+
"section_name": "III-B Resource Optimization for Cross-Chain Smart Contracts",
|
| 105 |
+
"text": ""
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "3.2.1",
|
| 109 |
+
"parent_section_id": "3.2",
|
| 110 |
+
"section_name": "III-B1 Collective Validation of Producer Blockchain",
|
| 111 |
+
"text": "In the proposed model, it\u2019s worth highlighting that not every node in the consumer blockchain network needs to maintain a dedicated environment for running validation smart contracts within the producer blockchain.\nMaintaining separate producer environments for each consumer node can result in significant resource redundancy, including duplicated storage space, computational power, and memory.\nInstead, several nodes can efficiently share a single producer environment. This approach is especially advantageous for nodes that share similar interests or benefits, such as some nodes within the same research laboratory or company, all managed by a single entity or person.\nThis optimization offers substantial resource savings, reducing the overall computational and storage resources required to execute and verify smart contracts in the consumer environment. The corresponding validation method is called collective validation.\nTo illustrate the resource efficiency gained by collective validation, we can formulate the following equation:\nLet:\n- be the total number of consumer nodes in the network.\n- represent the resources required for an individual producer environment.\n- denote the resources needed for a shared producer environment.\n- be the total resource savings achieved through sharing environments.\nThe resource savings () can be calculated as in (1 ###reference_###).\nThis equation demonstrates that the total resource savings () scale linearly with the number of consumer nodes (). By adopting a shared environment strategy, these savings can be significant, reducing the overall resource overhead in the network.\nThe collective validation approach optimizes the utilization of computational, storage, and memory resources within a cross-chain interaction framework. It promotes resource efficiency, cost savings, scalability, and network resilience while ensuring that the validation of smart contracts remains robust and secure."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "3.2.2",
|
| 115 |
+
"parent_section_id": "3.2",
|
| 116 |
+
"section_name": "III-B2 Utilizing Cross-Chain Embedded Smart Contracts for Resource Optimization",
|
| 117 |
+
"text": "In the smart contract validation, there are two main aspects we should consider for the consumer blockchain.\n(1) Calculations resources occupation. From the above process, we can see that a smart contract is loaded into the memory, instantiation, and running on the nodes of the consumer blockchain. This process occupies corresponding resources, including as CPUs and memories. Together with the smart contract on the consumer blockchain, it will burden the nodes of consumer blockchain.\n(2) Synchronization data burden. Meanwhile, there are transactions used to trigger the interface of a smart contracts, which carries the parameters to invoke the interface, and we call these transactions the invocation transactions. While these transactions may locate in different blocks, which is then required for the validation of the execution results of smart contracts. The more invocation transactions required, the more information that requires to be synchronization during the cross-chain validation.\nConsidering the above two burdens, we give to measurements for the cross-chain smart contract validation, occupation time, related transactions, which can described as a two-tuples , as shown in (2 ###reference_###).\n, where is the resource occupation time and is the transactions that are requires to verify the results of the smart contract.\nWe now discuss resources occupation time, to measure the time that a smart contract occupies resources of blockchain nodes. is shown in (3 ###reference_###).\n, where and are the time that a smart contract is instantiated and the time that the smart contract is terminated separately.\nIf a smart contract can have less , it can reduce the resources\u2019 occupation of blockchain nodes. The following section will discuss one kind of this smart contract.\nMeanwhile, research indicates that a significant proportion of smart contracts, around 75%, remain uninvoked, with 80% only being executed once. One approach to mitigate the resource consumption of these smart contracts is through the use of embedded smart contracts. Embedded smart contracts are introduced in the work by Suh et al. [20 ###reference_b20###], a method that integrates the smart contract within its first invocation transaction. In this section, we evaluate the advantages and potential resource savings associated with the use of embedded smart contracts for cross-chain interactions.\nEmbedded smart contracts are instantiated exclusively during invocation, providing an effective means to conserve resources. This conservation applies to both computational and transaction-related resources, which are vital considerations in cross-chain interactions.\n(1). Resource Efficiency: One key benefit of embedded smart contracts is the efficient utilization of computational resources. As illustrated in equation (4 ###reference_###), which outlines the time relationship, the embedded smart contract\u2019s code is initiated later than traditional deployments (). Consequently, it imposes a reduced burden on the computational resources of blockchain nodes.\n(2). Transaction Optimization: The use of embedded smart contracts streamlines the validation process by reducing the number of cross-chain transactions required. Traditional deployment necessitates separate transactions for contract deployment and its initial invocation. With embedded smart contracts, these two steps are merged into a single transaction, effectively reducing the number of transactions required for validation.\nMoreover, there exists a unique class of smart contracts known as \"disposable smart contracts.\" These contracts possess a distinctive trait: they are designed to be invoked only once and are immediately terminated, precluding any subsequent invocations. This characteristic sets them apart from conventional long-lived smart contracts.\nThis unique usage pattern allows for a resource-saving strategy. When a smart contract can be identified as disposable, it significantly reduces the resources expended in cross-chain interactions, particularly with regard to both time and transactions.\nDisposable smart contracts require minimal resource allocation due to their ephemeral nature. Unlike long-lived contracts, which maintain a presence on the blockchain, disposable smart contracts have a concise lifecycle. As seen in Equation (5 ###reference_###), the smart contract\u2019s initiation (begin) closely aligns with its invocation and termination times (). This near-simultaneity results in the efficient use of resources."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "4",
|
| 121 |
+
"parent_section_id": null,
|
| 122 |
+
"section_name": "IV Cross-chain Confirmation with Proof for Results of Cross-chain Smart Contract",
|
| 123 |
+
"text": "Following the validation of cross-chain smart contract results, there is a need to verify and put these results and their associated information into the consumer blockchain. It\u2019s important to note that cross-chain smart contract results are not inherently present in the consumer blockchain\u2019s blockchains. Cross-chain confirmation with proof for cross-chain smart contract has two characteristics: (a) to confirm the results of cross-chain smart contracts within the consumer blockchain (confirmation), and (b) to provide evidence that these results have also been verified and accepted within the producer blockchain (proof).\nThe requirement for confirmation arises to accomplish two primary objectives:\n(1) Mitigating Deception: Confirmation entails providing substantial proof that the relevant events transpired on the producer blockchain. By confirming this information, consumer blockchain nodes enhance security and guard against deceptive actions on the producer blockchain. In scenarios where a consumer blockchain attempts to manipulate a block and later alleges that it has undergone re-branching, the validation process ensures the veracity of such claims.\n(2) Ensuring a Unified View of the Producer Blockchain: Confirmation guarantees that the nodes of the consumer blockchain maintain a coherent and unified perspective of the producer blockchain. Both producer and consumer blockchains can experience re-branching events, leading to two significant benefits:\n(2a) When a specific block in the producer blockchain disappears, without corresponding confirmation, the nodes responsible for mining the block that contained the vanishing cross-chain information may be unjustly accused of wrongdoing.\n(2b) In cases where a block containing a specific cross-chain event from the consumer blockchain disappears, the importance of cross-chain confirmation and proof mechanisms becomes evident. When such a block vanishes, it raises the need for a unified view within the consumer blockchain regarding whether the particular event has indeed occurred on the producer blockchain. Confirmation with proof processes are vital to maintain this unified perspective and ensure the security and integrity of cross-chain interactions.\nNow, we provide the definition of cross-chain confirmation with proof, which entails confirming essential information from the producer blockchain to the consumer blockchain. This confirmation includes supporting evidence that specific events have occurred on the producer blockchain, ultimately guaranteeing the precision of cross-chain interactions.\nCross-chain confirmation was initially introduced in the context of conditional transactions, as discussed in [21 ###reference_b21###]. However, this method primarily focuses on confirming individual transactions and does not encompass comprehensive block confirmation with supporting proof."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "4.1",
|
| 127 |
+
"parent_section_id": "4",
|
| 128 |
+
"section_name": "IV-A Prerequisites for Cross-chain Confirmation",
|
| 129 |
+
"text": "Several prerequisites are essential for enabling cross-chain confirmation."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "4.1.1",
|
| 133 |
+
"parent_section_id": "4.1",
|
| 134 |
+
"section_name": "IV-A1 Longest Chain Rule",
|
| 135 |
+
"text": "The primary prerequisite is the adoption of the longest blockchain rule [21 ###reference_b21###] [22 ###reference_b22###]. This rule dictates that, when confronted with two conflicting chains of blocks, the longest chain is recognized as the main chain. In other words, the chain where the data is widely accepted. Since the longest chain typically boasts the highest number of mining nodes and is the most challenging to disrupt, it garners widespread acceptance and is regarded as the only valid blockchain."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "4.1.2",
|
| 139 |
+
"parent_section_id": "4.1",
|
| 140 |
+
"section_name": "IV-A2 Difficulty in Forging Blocks",
|
| 141 |
+
"text": "It is crucial to make it challenging to forge a block, imposing certain constraints that deter illicit actions by hackers . In Proof of Work (PoW), miners must continuously compute to discover a nonce that results in the block\u2019s hash meeting specific criteria. In Proof of Stake (PoS), an account\u2019s likelihood of mining a block is proportional to the assets it holds. This makes it challenging for hackers to forge the signatures of target accounts."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "4.1.3",
|
| 145 |
+
"parent_section_id": "4.1",
|
| 146 |
+
"section_name": "IV-A3 Confirmation Completion Judgment",
|
| 147 |
+
"text": "To declare that information is confirmed, there must be a sufficient number of successive blocks verifying the previous ones. The addition of a new block to an older one makes it increasingly difficult to falsify the older block, as demonstrated in [22 ###reference_b22###]. When multiple blocks follow a specific block, it becomes proportionately more challenging to falsify all subsequent blocks."
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"section_id": "4.2",
|
| 151 |
+
"parent_section_id": "4",
|
| 152 |
+
"section_name": "IV-B Requirements for Cross-chain Confirmation",
|
| 153 |
+
"text": "In order to confirm the results of cross-chain smart contracts, the following are two requirements for cross-chain confirmation.\n(1) The validation process requires the entire relationship of chain of blocks from the producer blockchain. This approach ensures an equivalent level of security as the original blockchain (the producer blockchain).\nThis requirement is crucial to prevent the mining nodes of the consumer blockchain from falsifying data related to the producer blockchain. The chain of blocks constitutes successive evidence chain for every event within the producer blockchain. Any omission of blocks would render it impossible to establish the integrity of subsequent blocks, starting from the genesis block.\nTo illustrate this, let\u2019s assume there are blocks in the producer blockchain. If we decide to skip the synchronization of a block, indexed as (where ), the synchronized chain\u2019s level of difficulty is equivalent to a chain comprising or blocks, rather than the complete blocks. Consequently, while the blocks from 1 (the genesis block) to are synchronized, they do not contribute to the verification of the blocks from to .\n(2) The information necessary for cross-chain confirmation should be minimized, focusing exclusively on synchronizing the data required for the relationships among blocks and validating the results of cross-chain smart contracts.\nRegarding the chain of blocks, complete synchronization of all block details is unnecessary. Only the block headers need to be synchronized, as they contain sufficient information to verify the block\u2019s authenticity and difficulty. Therefore, in this paper, we specifically refer to block headers rather than the entire block when discussing the chain of blocks.\nFor transactions, synchronization is restricted to those transactions essential for cross-chain validation. This includes deployment transactions, invocation transactions, termination transactions, and similar transactions related to cross-chain smart contracts.\nWhen synchronizing a cross-chain transaction, the associated Merkel tree, as described in [22 ###reference_b22###] (or other relevant sources), must also be included to establish the transaction\u2019s validity."
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"section_id": "4.3",
|
| 157 |
+
"parent_section_id": "4",
|
| 158 |
+
"section_name": "IV-C Cross-Chain Confirmation with Proof",
|
| 159 |
+
"text": "Cross-chain confirmation with proof is to mine data from the producer blockchain into the consumer blockchain to the consumer blockchain. This process comprises two main phases: the data synchronization process and the data mining process.\nThe data synchronization process, as discussed in [10 ###reference_b10###], entails synchronizing data from the producer blockchain to the consumer blockchain. We won\u2019t delve into further details in this paper, as it has been addressed in [10 ###reference_b10###].\nFollowing data synchronization, miners must incorporate this data into the consumer blockchain. We refer to this step as the cross-chain mining process. Essentially, this process involves mining the data, including the necessary blocks and transactions, from the producer blockchain into the consumer blockchain.\nBlock headers are periodically synchronized and mined into the consumer blockchain. It\u2019s crucial that newly confirmed blocks from the producer blockchain form a continuous chain starting from the last confirmed chain of blocks on the producer blockchain. This means that if the last confirmed block was , the next block should be , and it must have a hash link in the blockchain header to the previous block, .\nHowever, since the primary objective is to validate the producer data, there\u2019s no necessity to mine data from the producer blockchain into the consumer blockchain on a block-by-block basis. A more efficient approach is to mine data at regular intervals, for example, every blocks (where is a specific number), or when a block contains cross-chain data. A collection of blocks from the producer blockchain that are mined at once in the consumer blockchain is referred to as a cross-chain block segment, and represents the length of a cross-chain block segment. This strategy helps mitigate the challenges associated with frequent re-branching in the producer blockchain.\nLet\u2019s illustrate this with a brief example. Imagine there are blocks of producer blockchain, denoted as , , \u2026, , which have not yet been mined in the consumer blockchain. When later these blocks are mined, consider the first block, . It now has consecutive blocks following it, which significantly raises the difficulty for any attempt to re-branch.\nFor instance, if we set to 3, the first block () will have 2 successive blocks (, ) linked to it, making it far more resistant to re-branching than a block with no successive blocks.\nWhile rebranching is only one of the impact factors, selecting an appropriate value for should adhere to the following criteria.\n(R1)The total size of the data from the producer blockchain, represented by pieces, should be smaller than the maximum block size of the consumer blockchain to ensure it can fit within a single block. This can be expressed as:\n(R2) If condition (R1) can be met, then should be a number that is either close to or greater than the number of successive blocks required to confirm a block. This is because the first blocks in this block segment have the enough low probability of being rebranched. This can be framed as:\n, where represents the average probability of a block being fake.\n(R3) On the other hand, should not be excessively large, as indicated in (8 ###reference_###). When is too large, there\u2019s a risk that other nodes may have ample time to fabricate a cheating block. This is particularly relevant as we allow the mining of one block if it contains cross-chain smart contracts.\n, where denotes the average time for a block to be produced in the producer blockchain."
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"section_id": "4.4",
|
| 163 |
+
"parent_section_id": "4",
|
| 164 |
+
"section_name": "IV-D Block Data Structure for Cross-chain Confirmation",
|
| 165 |
+
"text": "To facilitate the cross-chain confirmation process, the block data structure of the consumer blockchain will be adjusted to include data used for validation from the producer blockchain: the chain of blocks of the producer blockchain and the necessary data to validate the results of cross-chain smart contracts. A depiction of the block structure is shown in Figure 2 ###reference_###.\n###figure_2### To support the confirmation with proof, the Merkle tree is specially designed, with the root having two subtrees. One subtree is for internal data (such as the hash of the consumer blockchain), while the other is for the producer blockchain. This design prevents the chain of blocks from reverting to the producer blockchain, which could cause confusion."
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"section_id": "5",
|
| 169 |
+
"parent_section_id": null,
|
| 170 |
+
"section_name": "Security Analysis",
|
| 171 |
+
"text": "In the proposed model, a fundamental security enhancement is achieved through the validation of cross-chain smart contract results and confirmation with proof. We attempt to analyze the corresponding impact from the corresponding aspects."
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"section_id": "5.1",
|
| 175 |
+
"parent_section_id": "5",
|
| 176 |
+
"section_name": "Validation of Cross-Chain Smart Contract Results",
|
| 177 |
+
"text": ""
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"section_id": "5.1.1",
|
| 181 |
+
"parent_section_id": "5.1",
|
| 182 |
+
"section_name": "V-A1 Mitigating Cheating",
|
| 183 |
+
"text": "By independently verifying the results of cross-chain smart contracts, the consumer blockchain can ensure that it is not reliant on potentially untrustworthy or erroneous data from the producer blockchain. This is crucial for maintaining the integrity of cross-chain interactions. If the results were not fully verified, dishonest nodes on either the producer or consumer blockchain could potentially manipulate or cheat the system, leading to incorrect outcomes and security vulnerabilities.\nOne crucial motivation for cross-chain smart contract validation is the absence of an assurance during cross-chain synchronization, despite the presence of validation processes within each blockchain. Consequently, with cross-chain smart contract validation, any discrepancies can be detected, and the validation process can prevent incorrect or tampered data from being incorporated into the consumer blockchain.\nWe can illustrate this briefly. Let represent data from the producer blockchain. represents the consensus algorithm of the producer blockchain. represents the synchronization process, which transfers data from the producer to the consumer blockchain.\nThe validation process ensures the trustworthiness of data from the producer blockchain, as shown in Equation (9 ###reference_###).\nHere, ensures that the data received via synchronization () matches the consensus algorithm on the producer blockchain ().\nThe process illustrated by Equation (9 ###reference_###) reinforces the critical role of validation in enhancing security and mitigating cheating risks in cross-chain interactions."
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"section_id": "5.1.2",
|
| 187 |
+
"parent_section_id": "5.1",
|
| 188 |
+
"section_name": "V-A2 Data Integrity",
|
| 189 |
+
"text": "Verifying the results of cross-chain smart contracts ensures data integrity within the consumer blockchain. When cross-chain smart contract results are verified, it provides an additional layer of assurance that the data stored in the consumer blockchain is accurate and trustworthy. This validation mechanism acts as a safeguard against potential data tampering or inaccuracies that could occur during the cross-chain data transfer process. By verifying results, the consumer blockchain can maintain a high level of data integrity, enhancing the overall reliability and security of the blockchain network. This is especially important for applications where the accuracy of data is critical, such as financial transactions or supply chain management. Any compromise in data integrity can lead to financial losses or other adverse consequences."
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"section_id": "5.2",
|
| 193 |
+
"parent_section_id": "5",
|
| 194 |
+
"section_name": "Maintaining or Enhancing the Security of Producer Blockchain Data in the Consumer Blockchain",
|
| 195 |
+
"text": "The model aims to ensure that the security of data from the producer blockchain is not compromised in the consumer blockchain.\nTo confirm data from the producer blockchain in the consumer blockchain, nodes on the consumer blockchain must provide successive blocks that match the corresponding difficulties. Attempting to introduce fraudulent data would require significant computational resources and would be economically unfeasible, as illustrated in Equation (10 ###reference_###). This is due to the mining process, which involves solving cryptographic puzzles that requires substantial computational resources.\n, where means the cost to make a fraud data of the producer blockchain, and means the corresponding benefits.\nIn summary, the model\u2019s approach to fully verifying cross-chain smart contract results and maintaining or enhancing the security of producer blockchain data in the consumer blockchain significantly bolsters the security and trustworthiness of cross-chain interactions. It mitigates cheating, error handling issues, and ensures data integrity, making it suitable for a wide range of applications where data accuracy and security are paramount."
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"section_id": "6",
|
| 199 |
+
"parent_section_id": null,
|
| 200 |
+
"section_name": "VI Verification",
|
| 201 |
+
"text": "In this section, we aim to verify the feasibility of the proposed solution on two aspects. First, we assess the resources utilized in the validation of cross-chain smart contracts and the implemented methodologies to conserve resources. This verification aims to demonstrate the efficiency of resource usage and the viability of cross-chain smart contract validation. Second, we evaluate the impact of cross-chain segment block lengths on cross-chain smart contract validation, as it is a crucial factor affecting the confirmation process."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"section_id": "6.1",
|
| 205 |
+
"parent_section_id": "6",
|
| 206 |
+
"section_name": "VI-A Resource Consumption of Cross-chain Smart Contract Validation",
|
| 207 |
+
"text": "The resource consumption of cross-chain smart contract validation was assessed in this section to evaluate its feasibility from the perspective of resource utilization. The verification focused on CPU and memory usage.\nWe conducted a comparison of resource utilization between two blockchains, denoted as blockchain1 and blockchain2. Both blockchains utilize the Proof of Work (PoW) consensus algorithm and synchronize their transactions and blocks. To assess resource consumption, we configured nodes within blockchain1 for distinct purposes. One node was dedicated solely to cross-chain smart contract validation without engaging in mining operations, labeled as CCValidationWithoutMining. Another node was exclusively assigned to mining activities without participating in cross-chain smart contract validation, identified as MiningWithoutCCValidation. The remaining nodes were tasked with both mining and cross-chain validation, classified as MiningAndCCValidation.\nThe cross-chain smart contract is designed to perform the following operations, aiming to simulate tasks that take a certain amount of time.\n(1) Generate a random number between 100 and 100 * 100 * 100, denoted as .\n(2) Loop from 1 to to generate random numbers and accumulate their sum.\n(3) Output the total sum after the loop is completed.\nA periodic task triggers transactions to call the smart contract every 3 seconds, aiming to generate multiple trigger transactions within one mining period, which typically takes around 10 seconds. Another task utilizes the \u2019top\u2019 command in Linux to record CPU and memory utilization every 5 seconds, capturing resource usage during mining.\nWe collected over 200 data points on resource consumption during the verification process. However, for clarity, we present the results for the first 50 rounds in Figures 3 ###reference_###, 4 ###reference_###, 5 ###reference_###, and 6 ###reference_###.\n###figure_3### ###figure_4### ###figure_5### ###figure_6### The analysis of resource utilization depicted in the figures indicates that the requirements for nodes are within reasonable bounds. Specifically, the average CPU utilization is 104.91%, 1.26%, and 97.17% for \"MiningAndCCValidation,\" \"CCValidation,\" and \"MiningWithoutCCValidation,\" respectively. Cross-chain smart contract validation contributes only a 1.2% additional CPU load, which is manageable for blockchain nodes.\nRegarding memory usage, the average memory utilization is 4.86%, 0.72%, and 5.59% for \"MiningAndCCValidation,\" \"CCValidation,\" and \"MiningWithoutCCValidation,\" respectively. Nodes exclusively performing cross-chain smart contract validation utilize less than 1% of memory, while nodes involved in both mining and cross-chain validation use less than 5% of memory, even less than nodes exclusively engaged in mining. This discrepancy may be attributed to random factors, as both threads occupy minimal memory (less than 6%).\nConsequently, considering the CPU and memory usage of cross-chain validation, performing cross-chain smart contract validation for blockchain nodes appears to be feasible from a resource perspective."
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"section_id": "6.1.1",
|
| 211 |
+
"parent_section_id": "6.1",
|
| 212 |
+
"section_name": "VI-A1 Resource Optimization by Collective Validation",
|
| 213 |
+
"text": "In this verification scenario, we intend to demonstrate the storage savings achieved through collective validation of smart contracts. Within a local network, one node is designated as the storage node, while other nodes retrieve data for validation from the storage node, eliminating the need to store blockchain data locally.\nThree selected nodes, referred to as node 1, node 2, and node 3, are utilized for this purpose. Node 3 is responsible for storing cross-chain validation data, while nodes 1 and 2 request pertinent information from node 3 for cross-chain smart contract validation.\nTransactions are sent by a script at one-second intervals in the producer blockchain. Every 30 seconds, the size of the working folder in the consumer blockchain, which contains the blockchain code, logs, and data (local blockchain data and cross-chain validation data), is recorded. The first 30 rounds of sampling are then displayed in Figure 7 ###reference_###.\n###figure_7### Figure 7 ###reference_### depicts the results, showing that the node responsible for storing the cross-chain validation data (node 3) has the largest storage footprint. The average data usage for nodes engaged in blockchain data storage is 8.16MB, 8.08MB, and 8.33MB, respectively, with storage savings exceeding 7% for one node. As more nodes participate, the storage savings increase, with an average space-saving ratio of . When there are nodes, this approach can save storage space. Nevertheless, utilizing more nodes to share the same data introduces heightened risks, rendering this approach suitable for small groups. In cases where numerous nodes depend on a single storage node, the failure of the storage node can impact all connected nodes."
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"section_id": "6.2",
|
| 217 |
+
"parent_section_id": "6",
|
| 218 |
+
"section_name": "VI-B Impact of Cross-chain Block Segment Length on Cross-chain Smart Contract Validation",
|
| 219 |
+
"text": "In this section, we examine the influence of the cross-chain block segment length on cross-chain validation, with a focus on two critical aspects: the likelihood of falsifying cross-chain blocks and the issue of competitive rebranching."
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"section_id": "6.2.1",
|
| 223 |
+
"parent_section_id": "6.2",
|
| 224 |
+
"section_name": "VI-B1 Probability of Falsifying Cross-Chain Blocks",
|
| 225 |
+
"text": "The primary objective is to assess the probability of an adversary node fabricating a block to deceive the system before the synchronization of the next block segment from the producer blockchain. As each segment usually contains a specific number of cross-chain blocks, it takes more than the regular block time to mine a single block. Consequently, there exists a probability for adversary nodes to mine a block with fraudulent data on the consumer blockchain before legitimate blocks are mined.\nFor our experiments, we use separate processes to represent different blockchain nodes, thereby obviating the need for too many separate hardware devices. These nodes are divided into two groups: Group A represents the producer blockchain with nodes (as described later), and Group B consists of a single adversary node attempting to mine a fraudulent block in the consumer blockchain. The success or failure of the adversary node\u2019s attempt depends on whether it can mine one block before a segment of blocks is mined on the producer blockchain.\nWe aimed to investigate different numbers () of nodes on the producer blockchain, including scenarios with up to 2048 nodes, which was challenging to replicate in our lab environment. As the mining time for a single block followed an exponential distribution [23 ###reference_b23###], we were able to simulate this mining time. The average mining time () for one block was set to 10 seconds, representing the laboratory\u2019s average blockchain mining duration. This parameter was utilized for generating mining times using Python\u2019s random.expovariate(1 / ) function.\nOur experimental setup utilized a Lenovo workstation (ThinkStation P350) equipped with 16 CPUs, an 11th Gen Intel(R) Core(TM) i9-11900K processor running at 3.50GHz, and 32 GB of RAM. The operating system used was Ubuntu 22.04.1 LTS.\nTo assess the impact of varying cross-chain block segment lengths (), we considered lengths of 2, 3, 4, 5, 6, 7, and 8. Furthermore, we explored the influence of different producer node counts () in Group A, ranging from 2 to 2048. We conducted 100,000 simulations for each scenario, and the results are shown in Figure 8 ###reference_###.\n###figure_8### Figure 8 ###reference_### illustrates that with the increase in the length of the cross-chain block segment, the likelihood of successful block cheating (referred to as the successful cheating probability) also increases. For instance, when is 2 (indicating two nodes in the producer blockchain for mining), the successful cheating probability ranges from 44.33% to 63.52%. However, the incremental gain in successful cheating probabilities becomes less pronounced as increases, primarily due to the already relatively small base probability. When reaches 2048, the successful cheating probability is observed to be in the range of 0.06% to 0.15%.\nNotably, the successful cheating probability changes with different trends. We observed a substantial reduction in the probability of successful cheating as increased from 2 to 128, followed by a more gradual decrease from 128 to 2048, where the total success probability remained relatively low. Specifically, when increased to 1024, the likelihood of successful block cheating did not exceed 6%.\nIn summary, the above results revealed several interesting points. We observed that the length of cross-chain block segments has a significant impact on the successful cheating probability, with longer segments leading to higher successful cheat probability. Additionally, as the number of nodes in the producer blockchain increased from 2 to 128, there was a substantial reduction in successful cheating probabilities. However, beyond 128 nodes, the decrease in success rates became more gradual."
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"section_id": "6.2.2",
|
| 229 |
+
"parent_section_id": "6.2",
|
| 230 |
+
"section_name": "VI-B2 Impact on Rebranch Probability",
|
| 231 |
+
"text": "In this section, we focus on investigating the impact of the length of cross-chain block segments on rebranch probabilities. The experimental setup is similar to the previous section, except that in Group B, the adversary node will mine the same number of blocks as that in Group A to trigger a rebranch.\nThe parameters in detail include:\nVarious lengths of cross-chain block segments () on rebranch probabilities, ranging from 2 to 8.\nDifferent node counts within the producer blockchain () in Group A, spanning from 2 to 2048, across various verification scenarios. Each scenario involves 100,000 simulations, and the results are illustrated in Figure 9 ###reference_###.\n###figure_9### Figure 9 ###reference_### illustrates that as the length of the cross-chain block segment increases, the probability of rebranching decreases. For instance, when is 2, the rebranch probability ranges from 31.50% to 27.63%. However, as increases, the reduction in the rebranch probability becomes less pronounced due to the relatively small base probability. For instance, when reaches 2048, the rebranch probability decreases from 0.03% to 0.01%. Detailed values can be found in Table II ###reference_###.\nThe findings from Figure 9 ###reference_### and table II ###reference_### emphasize that longer cross-chain block segments lead to a reduced probability of rebranching. Additionally, while an increase in results in a lower rebranch probability, the rate of reduction is less significant due to the initially small rebranch probability. These findings contribute to a better understanding of the dynamics of rebranching within blockchain networks."
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"section_id": "7",
|
| 235 |
+
"parent_section_id": null,
|
| 236 |
+
"section_name": "VII Conclusion",
|
| 237 |
+
"text": "Conclusion: This paper addresses the critical challenges associated with the validation of cross-chain smart contract results. It introduces a novel method that involves executing the cross-chain smart contract to ensure the integrity of results, effectively mitigating the risks of fraudulent activities during cross-chain propagation. Moreover, the proposed approach emphasizes the significance of adopting a unified view for cross-chain smart contract results through a comprehensive confirmation process, incorporating proof of the chain of blocks and related data from the producer blockchain into the consumer blockchain. The verification results clearly demonstrate the feasibility of conducting cross-chain verification of results at the smart contract level, while also highlighting the manageable resource utilization associated with the proposed validation approach.\nFurther research can focus on optimizing synchronization protocols to enhance the efficiency and reliability of cross-chain data transmission. Exploring the integration of blockchain technology with emerging fields like Internet of Things (IoT) and decentralized finance (DeFi) can open up new avenues for cross-chain smart contract validation. Additionally, addressing scalability challenges and devising innovative solutions to enable seamless integration among multiple blockchain networks will be pivotal for fostering widespread adoption and interoperability across diverse blockchain ecosystems."
|
| 238 |
+
}
|
| 239 |
+
],
|
| 240 |
+
"appendix": [],
|
| 241 |
+
"tables": {
|
| 242 |
+
"1": {
|
| 243 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE I: </span>Successful block cheat ratio</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.1.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T1.1.1.1.1.1\">Nodes</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"7\" id=\"S6.T1.1.1.1.2\">Number of Blocks</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.1\">2</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.2\">3</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.3\">4</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.4\">5</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.5\">6</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.6\">7</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T1.1.2.2.7\">8</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T1.1.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.1\">2 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.2\">0.44337</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.3\">0.50355</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.4\">0.54573</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.5\">0.57632</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.6\">0.6014</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.7\">0.62111</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.3.1.8\">0.63526</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.1\">8 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.2\">0.16161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.3\">0.19355</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.4\">0.21398</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.5\">0.22858</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.6\">0.24717</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.7\">0.25985</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.4.2.8\">0.26686</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.1\">32 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.2\">0.04526</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.3\">0.05411</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.4\">0.06196</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.5\">0.06652</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.6\">0.07237</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.7\">0.07745</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.5.3.8\">0.08112</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.1\">128 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.2\">0.01143</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.3\">0.01458</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.4\">0.01633</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.5\">0.01669</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.6\">0.0186</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.7\">0.01976</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.6.4.8\">0.02105</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.7.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.1\">512 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.2\">0.00312</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.3\">0.00298</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.4\">0.00426</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.5\">0.0047</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.6\">0.00426</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.7\">0.00491</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T1.1.7.5.8\">0.0051</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T1.1.8.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.1\">2048 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.2\">0.00067</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.3\">0.00084</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.4\">0.00094</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.5\">0.00109</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.6\">0.00129</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.7\">0.00129</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T1.1.8.6.8\">0.00154</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 244 |
+
"capture": "TABLE I: Successful block cheat ratio"
|
| 245 |
+
},
|
| 246 |
+
"2": {
|
| 247 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S6.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE II: </span>Rebranch Probabilities by different lengthd of cross-chain block segment</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S6.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S6.T2.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.1.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S6.T2.1.1.1.1.1\">Nodes</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"7\" id=\"S6.T2.1.1.1.2\">Number of Blocks</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.1\">2</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.2\">3</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.3\">4</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.4\">5</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.5\">6</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.6\">7</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S6.T2.1.2.2.7\">8</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S6.T2.1.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.1\">2 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.2\">0.31504</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.3\">0.30219</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.4\">0.2977</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.5\">0.28779</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.6\">0.28364</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.7\">0.27643</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.3.1.8\">0.27639</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.4.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.1\">8 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.2\">0.09153</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.3\">0.08178</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.4\">0.07334</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.5\">0.06879</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.6\">0.06229</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.7\">0.05978</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.4.2.8\">0.05678</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.1\">32 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.2\">0.02308</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.3\">0.01861</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.4\">0.01666</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.5\">0.01555</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.6\">0.01422</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.7\">0.01240</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.5.3.8\">0.01162</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.6.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.1\">128 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.2\">0.00582</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.3\">0.00485</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.4\">0.00436</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.5\">0.00367</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.6\">0.00338</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.7\">0.00291</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.6.4.8\">0.00291</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.7.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.1\">512 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.2\">0.00161</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.3\">0.00123</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.4\">0.00111</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.5\">0.00101</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.6\">0.00079</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.7\">0.00073</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S6.T2.1.7.5.8\">0.00073</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S6.T2.1.8.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.1\">2048 nodes</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.2\">0.00039</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.3\">0.00029</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.4\">0.00025</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.5\">0.00018</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.6\">0.00017</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.7\">0.00025</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S6.T2.1.8.6.8\">0.00016</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 248 |
+
"capture": "TABLE II: Rebranch Probabilities by different lengthd of cross-chain block segment"
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
"image_paths": {
|
| 252 |
+
"1": {
|
| 253 |
+
"figure_path": "2408.09962v1_figure_1.png",
|
| 254 |
+
"caption": "Figure 1: Cross-chain validation",
|
| 255 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/crosschain_verification.png"
|
| 256 |
+
},
|
| 257 |
+
"2": {
|
| 258 |
+
"figure_path": "2408.09962v1_figure_2.png",
|
| 259 |
+
"caption": "Figure 2: The new block data structure with data of consumer blockchain",
|
| 260 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/block_strcuture.png"
|
| 261 |
+
},
|
| 262 |
+
"3": {
|
| 263 |
+
"figure_path": "2408.09962v1_figure_3.png",
|
| 264 |
+
"caption": "Figure 3: The CPU occupation comparison.",
|
| 265 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/resourceOccupationCPU.png"
|
| 266 |
+
},
|
| 267 |
+
"4": {
|
| 268 |
+
"figure_path": "2408.09962v1_figure_4.png",
|
| 269 |
+
"caption": "Figure 4: The average CPU occupation comparison.",
|
| 270 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/resourceOccupationCPUAverage.png"
|
| 271 |
+
},
|
| 272 |
+
"5": {
|
| 273 |
+
"figure_path": "2408.09962v1_figure_5.png",
|
| 274 |
+
"caption": "Figure 5: The memory occupation comparison.",
|
| 275 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/resourceOccupationMemory.png"
|
| 276 |
+
},
|
| 277 |
+
"6": {
|
| 278 |
+
"figure_path": "2408.09962v1_figure_6.png",
|
| 279 |
+
"caption": "Figure 6: The average memory occupation comparison.",
|
| 280 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/resourceOccupationMemoryAverage.png"
|
| 281 |
+
},
|
| 282 |
+
"7": {
|
| 283 |
+
"figure_path": "2408.09962v1_figure_7.png",
|
| 284 |
+
"caption": "Figure 7: Resources occupation for nodes with shared blockchain data. Three nodes node 1, 2 and 3 shared the blockchain data. Node 3 store the cross-chain validation data for other two nodes.",
|
| 285 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/sharedBlockchainData.png"
|
| 286 |
+
},
|
| 287 |
+
"8": {
|
| 288 |
+
"figure_path": "2408.09962v1_figure_8.png",
|
| 289 |
+
"caption": "Figure 8: Impact of different lengths on cross-chain block cheat",
|
| 290 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/CompareMiningTimeByDifferentBlockListLength.png"
|
| 291 |
+
},
|
| 292 |
+
"9": {
|
| 293 |
+
"figure_path": "2408.09962v1_figure_9.png",
|
| 294 |
+
"caption": "Figure 9: Comparison of Rebranch Probabilities",
|
| 295 |
+
"url": "http://arxiv.org/html/2408.09962v1/extracted/5800209/CompareRebranchProbability.png"
|
| 296 |
+
}
|
| 297 |
+
},
|
| 298 |
+
"validation": true,
|
| 299 |
+
"references": [],
|
| 300 |
+
"url": "http://arxiv.org/html/2408.09962v1"
|
| 301 |
+
}
|
20240819/2408.09972v1.json
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Edge-Cloud Collaborative Motion Planning for Autonomous Driving with Large Language Models",
|
| 3 |
+
"abstract": "Integrating large language models (LLMs) into autonomous driving enhances personalization and adaptability in open-world scenarios. However, traditional edge computing models still face significant challenges in processing complex driving data, particularly regarding real-time performance and system efficiency. To address these challenges, this study introduces EC-Drive, a novel edge-cloud collaborative autonomous driving system with data drift detection capabilities. EC-Drive utilizes drift detection algorithms to selectively upload critical data, including new obstacles and traffic pattern changes, to the cloud for processing by GPT-4, while routine data is efficiently managed by smaller LLMs on edge devices. This approach not only reduces inference latency but also improves system efficiency by optimizing communication resource use. Experimental validation confirms the system\u2019s robust processing capabilities and practical applicability in real-world driving conditions, demonstrating the effectiveness of this edge-cloud collaboration framework. Our data and system demonstration will be released at https://sites.google.com/view/ec-drive.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "As intelligent transportation and autonomous driving technologies rapidly advance, the motion planning system, as a critical component, faces increasingly complex environments and diverse challenges. Traditional motion planning methods often rely on fixed algorithms and models, making it difficult to fully address the dynamic changes in traffic conditions and the personalized needs of drivers [1 ###reference_b1###].\nIntegrating large language models (LLMs) into autonomous vehicles not only enables artificial intelligence systems to control the driving process but also significantly enhances the system\u2019s personalization and adaptability. By understanding natural language commands, LLMs can dynamically adjust driving strategies to meet the personalized preferences of drivers or passengers, thereby improving the overall driving experience. Moreover, the integration of LLMs allows autonomous systems to better handle complex and dynamic open-world scenarios, making them more flexible in addressing diverse driving tasks.\nThe Transformer, originally designed for sequential data, has achieved state-of-the-art performance in natural language processing, driving the development of LLMs [2 ###reference_b2###, 3 ###reference_b3###].\nThese models pretrain Transformer architectures (encoder, encoder-decoder, and decoder) on vast corpora to capture extensive language statistics. Pretrained LLMs can be fine-tuned for specialized downstream tasks.\nThe Vision Transformer (ViT) [4 ###reference_b4###] applies the Transformer to image tasks, converting images into sequences of patches that the Transformer can process. CLIP [5 ###reference_b5###], a multimodal model that matches textual descriptions with images, demonstrates strong transfer capabilities in many image classification tasks. Utilizing pretrained LLMs as a framework for multimodal tasks leverages their text generation capabilities, which is crucial for the question-answering tasks in our research. However, despite their impressive performance in many tasks, deploying these large models with typically over a billion parameters for real-time applications remains challenging.\n###figure_1### Although autonomous driving systems primarily rely on visual features, incorporating linguistic features can enhance system interpretability and aid in identifying new traffic situations. This advantage has sparked interest in integrating multimodal data to train language models as autonomous driving agents.\nDriveGPT4 [6 ###reference_b6###] employs LLaMA as the backbone LLM, with CLIP as the visual encoder, using traffic scene videos and prompt texts as inputs to generate responses and low-level vehicle control signals.\nDriveMLM [7 ###reference_b7###] utilizes multi-view images, LiDAR point clouds, traffic rules, and user instructions from a real simulator to perform closed-loop driving. This multimodal model is constructed with LLaMA and ViT as the image processor.\nGPT-Driver [8 ###reference_b8###] reframes motion planning as a language modeling task, using GPT-3.5 to represent the planner\u2019s inputs and outputs as language tokens.\nHowever, these models utilize LLMs with over a billion parameters (such as GPT-3.5 [2 ###reference_b2###] and LLaMA [3 ###reference_b3###]) and expensive image encoders (such as CLIP and ViT), making them suitable mainly for latency-insensitive offline scenarios rather than latency-critical online scenarios. Recently, collaboration between large and small language models has garnered significant attention [9 ###reference_b9###]. Inspired by dual-process cognitive theory, various methods can be integrated into a unified framework.\nOur primary insight is to use data drift detection algorithms to upload a small number of difficult samples (e.g., new obstacles, changes in traffic patterns) to the cloud for processing by larger-scale models (e.g., GPT-4), while most samples are handled by smaller parameter LLMs at the edge. This approach, illustrated in Fig. 1 ###reference_###, ensures low inference latency while improving the handling of dynamic environments. This method has potential applications in remote assistance for autonomous vehicles, enabling them to navigate complex and evolving scenarios more effectively. Our main contributions are as follows:\nWe propose a novel edge-cloud collaborative autonomous driving system, EC-Drive, equipped with data drift detection capabilities. This efficient framework utilizes data drift detection algorithms to selectively upload a small number of challenging samples (e.g., new obstacles, changes in traffic patterns) to the cloud for processing by GPT-4, while most of the data is managed by smaller parameter LLMs on edge devices. This approach ensures low inference latency while effectively addressing the challenges of complex environments.\nWe introduce a multimodal approach that integrates linguistic features with traditional visual data, enhancing the interpretability and decision-making capabilities of autonomous driving systems. This integration allows the system to better understand and respond to new traffic situations, improving adaptability and safety.\nDetailed experimental validation demonstrates the system\u2019s robust processing capabilities and its potential applicability in real-world driving scenarios, highlighting the practical advantages and feasibility of the proposed edge-cloud collaborative framework."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Related Works",
|
| 15 |
+
"text": "This section reviews motion planning methods and the practical application of LLMs in autonomous driving, focusing on their strengths and challenges in complex traffic environments."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A Motion Planning in Autonomous Driving",
|
| 21 |
+
"text": "Autonomous driving utilizes various motion planning strategies for efficient vehicle navigation.\n(1) Rule-based method: This approach generates paths based on predefined rules that account for environmental constraints like road geometry and traffic signals [10 ###reference_b10###]. While simple and efficient, it is rigid and struggles to adapt to unexpected changes.\n(2) Optimization-based method: Optimization algorithms compute optimal trajectories by minimizing a cost function considering factors such as time, energy, safety, and comfort [1 ###reference_b1###]. Though precise, these methods are computationally intensive and may not suit real-time decision-making.\n(3) Learning-based method: This approach uses machine learning to adapt to dynamic environments by learning from past data [11 ###reference_b11###]. Deep neural networks and reinforcement learning provide adaptability but require significant data and resources, often struggling with rare or novel scenarios."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II-B Large Models",
|
| 27 |
+
"text": "Large models (LMs) based on the Transformer, such as Large Language Models [3 ###reference_b3###, 2 ###reference_b2###], vision models [4 ###reference_b4###, 5 ###reference_b5###], time series models [12 ###reference_b12###, 13 ###reference_b13###], and multimodal models [14 ###reference_b14###], have gained widespread attention due to their unique advantages. With billions to trillions of parameters, these models accumulate extensive knowledge through pre-trained on large datasets, significantly advancing the automation and diversification of data processing while reducing reliance on human expertise. Such capabilities have attracted broad interest in the industrial sector, fostering numerous studies targeting industrial intelligence.\nThe collaboration between large and small language models garners considerable attention. Inspired by dual-process cognitive theory, various methods can be integrated into a unified framework. Research indicates that the essential difference between large and small models lies in the control of uncertainty in next token predictions during the decoding process, and it highlights that collaborative interactions between models are most critical at the beginning of the generation process [9 ###reference_b9###]."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "II-C Motion Planning with LLMs.",
|
| 33 |
+
"text": "In recent years, significant progress has been made in the application of LLMs in the field of autonomous driving.\nUtilizing LLMs to enhance decision-making processes in autonomous vehicles has the potential to transform their operational methods. This approach offers personalized assistance, facilitates continuous learning, and improves decision intelligence [15 ###reference_b15###].\nPlanAgent[16 ###reference_b16###] is a multimodal large language model-based autonomous motion planning agent system that enhances environmental understanding through Bird\u2019s Eye View (BEV) and lane-graph-based textual descriptions. It introduces a hierarchical Chain of Thought (CoT) [17 ###reference_b17###] to guide the MLLM in generating planner code.\nHu et al. [18 ###reference_b18###] propose an LLM-driven collaborative driving framework for multiple vehicles, featuring lifelong learning capabilities. It allows different driving agents to communicate with each other, facilitating collaborative driving in complex traffic scenarios.\nDiLu [19 ###reference_b19###] is the first framework to leverage knowledge-driven capabilities in autonomous driving decision-making. It combines reasoning and reflection modules, enhancing the capabilities of LLMs, enabling them to apply knowledge and perform causal reasoning in the autonomous driving domain.\nTrafficGPT [20 ###reference_b20###] reveals the application potential of large language models in the smart transportation domain. These models possess the capability to view and process traffic data, providing profound decision support for urban traffic system management. Additionally, they assist in human decision-making during traffic control, demonstrating their practicality and efficacy in traffic management.\nLimSim++ [21 ###reference_b21###] is an open-source evaluation platform specifically designed for the research of autonomous driving with LVLMs, supporting scenario understanding, decision-making, and evaluation.\nDriveLM [22 ###reference_b22###] introduces datasets using nuScenes and CARLA, presenting a vision-language models based baseline approach that concurrently addresses Graph visual question answering and end-to-end driving. The experiments showcased Graph visual question answering as a simple and principled framework for scene reasoning.\nCODA-LM [23 ###reference_b23###] demonstrates that even the most advanced autonomous driving perception systems struggle with handling complex road corner cases.\n###figure_2###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "III Edge-Cloud Collaborative Motion Planning for Autonomous Driving",
|
| 39 |
+
"text": "In this section, we elaborate on the methodologies and technologies employed in the EC-Drive system, emphasizing the use of edge and cloud models as well as the collaborative process between them. This approach ensures efficient and safe decision-making, even in complex driving environments."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III-A Problem Statement",
|
| 45 |
+
"text": "In edge-cloud collaborative intelligent driving systems, we deploy small-scale LLMs on edge devices for real-time motion planning and large-scale LLMs on the cloud to provide efficient support. Edge devices, when processing real-time driving data, may encounter distribution shifts or decreased model confidence due to natural variations (such as changes in lighting or weather) or sensor degradation, which can affect model performance.\nTwo primary scenarios necessitate the request for support from large models in the cloud:\n(1) When the vehicle encounters new or previously unseen objects or situations, increasing decision-making complexity, and the edge model may be insufficient for accurate inference.\n(2) For instance, visual obstructions or lighting variations may reduce the accuracy and reliability of edge model predictions. Under such circumstances, leveraging large models in the cloud for deeper analysis can enhance system performance and safety.\n###figure_3### ###figure_4###"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-B System Architecture",
|
| 51 |
+
"text": "The proposed system architecture, illustrated in Fig. 2 ###reference_###, integrates edge and cloud components to enhance the overall performance of autonomous driving systems. The vehicle employs small-scale LLMs, fine-tuned using instruction-based approaches as shown in Fig. 3 ###reference_###, to manage routine driving tasks and process real-time sensor data for immediate decision-making.\nEdge Models:\nWe employ LLaMA-Adapter [24 ###reference_b24###], a parameter-efficient tuning mechanism based on the LLaMA language model. LLaMA-Adapter is specifically designed for scenarios where computational resources are constrained, such as autonomous driving. It introduces small, zero-initialized attention modules, which are fine-tuned to adapt to new tasks without modifying the entire pre-trained model. This approach minimizes the additional computational overhead, making it ideal for real-time motion planning on edge devices. The model processes real-time sensor data, including text, vision and LiDAR inputs, to make preliminary driving decisions under normal conditions. Pre-print, manuscript submitted to IEEE.\nCloud Models: In the cloud, large-scale LLMs such as GPT-4 offer advanced computational power for handling more complex and dynamic driving scenarios. Real-time data from various onboard sensors, including cameras, LiDAR, and radar, is collected and preprocessed to extract pertinent features. This preprocessing converts the raw sensor data into a structured format that is amenable to model inference. The processed data is then input into the edge model for initial inference, facilitating efficient and timely driving decisions under varying conditions.\nEdge-Cloud Collaboration Workflow: Inspired by [25 ###reference_b25###], we utilize the Alibi Detect library [26 ###reference_b26###] to monitor edge model performance. If anomalies or low-confidence predictions are detected, the system flags those instances and uploads the data to the cloud. The cloud model then performs detailed inference to generate optimized decisions, which are integrated with the edge model\u2019s outputs to update the vehicle\u2019s driving plan, ensuring safe and efficient operation.\nLet x denote the preprocessed driving data, and the inference result of the edge model is , where represents the edge model. We use the Alibi Detect library to perform anomaly detection. If the prediction result indicates the presence of data drift or low confidence, cloud model support is requested, resulting in an enhanced decision , where represents the cloud model. The overall process is shown in Algorithm 1 ###reference_###."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "IV Experiments",
|
| 57 |
+
"text": "In this section, we present experimental investigations into the real-time operational capabilities of autonomous driving systems using LLMs under different computational paradigms: Edge, Cloud, and Edge-Cloud Collaborative scenarios. Each subsection details distinct approaches and methodologies\u2014ranging from handling in-vehicle data processing at the edge to leveraging cloud computational power for intensive data analysis and decision-making. This comparative study aims to highlight the efficiency, scalability, and reliability of each model under varied driving conditions and their implications on autonomous driving technologies."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "IV-A Driving on Edge",
|
| 63 |
+
"text": "Scene Description:\nWe transcribe the current driving scene into descriptive text, including the current speed, acceleration, position of the ego vehicle, and information about surrounding vehicles. For example, the ego vehicle is traveling in the rightmost lane of a four-lane road at a speed of 25.0 m/s, with an acceleration of 0.0 m/s , and its lane position is 361.18 m. The information for other vehicles includes their speed, acceleration, and relative position, such as vehicle 496 in the left lane, ahead by 372.81 m, traveling at a speed of 21.2 m/s, with an acceleration of 0.2 m/s .\nReasoning and Thinking:\nThe scene description is embedded into vectors and input into the LLaMA-Adapter. Using CoT techniques, LLaMA-Adapter generates sequential reasoning logic and performs step-by-step logical reasoning. For instance, it first assesses whether the vehicle can accelerate. If not, it evaluates the safety of maintaining the current speed. If necessary, it further evaluates the possibility and safety of changing lanes.\nDecision Making:\nAs shown in Fig. 2 ###reference_###, the system decodes the final decision from the LLM\u2019s response and translates it into corresponding vehicle actions, following the outlined process.\nAs shown in Fig 4 ###reference_###, we demonstrate how the LLM performs step-by-step logical reasoning and decision-making in a complex traffic environment."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "IV-B Driving on Cloud",
|
| 69 |
+
"text": "As depicted in Fig. 5 ###reference_###, edge models face significant challenges in real incremental scenarios. Through the identification module, the system selectively uploads data to the cloud-based foundational model, powered by GPT-4, for queries, thereby enhancing motion planning performance. The inference process of the cloud model in real scenarios encompasses three critical stages: perception, prediction, and planning. These stages are essential for ensuring the model\u2019s efficient response.\n###figure_5###"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "IV-C Edge-Cloud Collaborative Motion Planning",
|
| 75 |
+
"text": "This project utilizes data collected by autonomous vehicles at the Guangzhou International Campus of South China University of Technology as the testing benchmark. The dataset comprises images captured from the perspective of autonomous vehicles, with a lower camera angle that aligns closely with practical autonomous driving applications such as delivery and patrol.\nFig. 6 ###reference_### illustrates the inference outcomes of different models in the same scenario. In most cases, edge models (LLaMA-Adapter [24 ###reference_b24###]) demonstrate performance comparable to cloud models (GPT-4 [2 ###reference_b2###]), where invoking cloud models offers limited improvement to the driving task and may lead to resource wastage and unnecessary delays.\nAlthough the edge model is capable of making quick inferences in most cases, the cloud model demonstrates extremely high accuracy when dealing with complex scenarios. For instance, in pedestrian recognition and complex road planning (as shown in the second case of Fig. 6 ###reference_###), the cloud model can correct the inference errors made by the edge model, thereby enhancing the overall safety and reliability of the system.\n###figure_6###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.4",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "IV-D Further Analysis and Discussion",
|
| 81 |
+
"text": "Inspired by [27 ###reference_b27###], we evaluate LLMs based on three metrics.\nGaze: Assessing the accuracy of LLMs in identifying areas of focus during the driving process. Scene Understanding: Evaluating the precision of LLMs in describing elements present in the current driving scene. Logic: Analyzing the correctness of the reasoning employed by LLMs in making driving decisions.\nTab. I ###reference_### presents the inference results of models of varying sizes within the dataset. The performance of cloud-based LLMs significantly surpasses that of edge-based small-scale models: As shown in the table, cloud-based LLMs (such as GPT-4 and GPT-4o) achieve higher scores across all three metrics (Gaze, Scene Understanding, and Logic) compared to edge-based small-scale models. Specifically, GPT-4 scores 87.1 in Gaze and 88.9 in Scene Understanding, significantly outperforming the highest scores of edge-based models, which are 66.8 and 59.4, respectively.\nEdge-based small-scale LLMs exhibit advantages in specific scenarios: Despite the superior overall performance of cloud-based LLMs, edge-based small-scale models demonstrate significant benefits in environments with limited computational resources or where low-latency responses are required. For instance, edge-based models such as Phi-2-2.7B and TinyLlama-1.1B provide relatively stable performance under constrained resources."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Conclusion",
|
| 87 |
+
"text": "This study extensively investigates the application performance of LLMs in autonomous driving systems, leveraging edge computing, cloud computing, and edge-cloud collaborative processing. In the edge computing environment, the system swiftly processes real-time driving data, utilizing CoT for logical inference and decision-making. The cloud model exhibits exceptional perception, prediction, and planning capabilities when handling complex driving scenarios. Notably, the edge-cloud collaboration selectively uploads critical data to the cloud, not only enhancing inference speed and conserving communication resources but also significantly reducing system latency. This collaboration also markedly improves the edge model\u2019s understanding of incremental and complex scenarios, thereby enhancing overall system performance in motion planning.\nThe experimental results validate the effectiveness and efficiency of the model in practical applications.\nThese findings provide crucial theoretical and practical guidance for the future development of autonomous driving technologies."
|
| 88 |
+
}
|
| 89 |
+
],
|
| 90 |
+
"appendix": [],
|
| 91 |
+
"tables": {
|
| 92 |
+
"1": {
|
| 93 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T1.5.1.1\" style=\"font-size:90%;\">TABLE I</span>: </span><span class=\"ltx_text\" id=\"S4.T1.6.2\" style=\"font-size:90%;\">Performance comparison of edge and cloud models in autonomous driving, focusing on relevant driving metrics.</span></figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T1.3\" style=\"width:433.6pt;height:209.2pt;vertical-align:-9.7pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-4.3pt,2.0pt) scale(0.980761486071569,0.980761486071569) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.3.3\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T1.3.3.3.4\">Type</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T1.3.3.3.5\">LLM</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.1.1.1.1\">Gaze ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.2.2.2.2\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.2.2.2.2.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.2.2.2.2.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.2.2.2.2.1.2.1\">Scene</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.2.2.2.2.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.2.2.2.2.1.1.1\">Understanding ()</td>\n</tr>\n</table>\n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.3.3.3.3\">Logic ()</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.4.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T1.3.3.4.1.1\">Edge</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_tt\" id=\"S4.T1.3.3.4.1.2\">Moondream</th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.3.3.4.1.3\">54.7</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.3.3.4.1.4\">52.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S4.T1.3.3.4.1.5\">49.6</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.5.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.5.2.1\">Edge</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.5.2.2\">OpenELM-450M <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09972v1#bib.bib28\" title=\"\">28</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.5.2.3\">59.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.5.2.4\">52.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.5.2.5\">50.5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.6.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.6.3.1\">Edge</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.6.3.2\">TinyLlama-1.1B <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.09972v1#bib.bib29\" title=\"\">29</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.6.3.3\">61.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.6.3.4\">53.9</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.6.3.5\">54.1</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.7.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.7.4.1\">Edge</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.7.4.2\">Gemma-2B</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.7.4.3\">65.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.7.4.4\">58.6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.7.4.5\">59.9</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.8.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.8.5.1\">Edge</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.8.5.2\">Phi-2-2.7B</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.8.5.3\">66.8</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.8.5.4\">59.4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.8.5.5\">61.2</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.9.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.9.6.1\">Cloud</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.9.6.2\">LLava-7B</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.9.6.3\">72.3</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.9.6.4\">74.2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.9.6.5\">61.5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.10.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.10.7.1\">Cloud</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.10.7.2\">LLama-Adapter</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.10.7.3\">75.1</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.10.7.4\">79.4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.10.7.5\">69.6</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.11.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.11.8.1\">Cloud</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.3.3.11.8.2\">GPT-4o</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.11.8.3\">85.3</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.11.8.4\">86.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.3.3.11.8.5\">80.3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.3.3.12.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T1.3.3.12.9.1\">Cloud</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T1.3.3.12.9.2\">GPT-4</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.3.3.12.9.3\">87.1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.3.3.12.9.4\">88.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.3.3.12.9.5\">81.6</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 94 |
+
"capture": "TABLE I: Performance comparison of edge and cloud models in autonomous driving, focusing on relevant driving metrics."
|
| 95 |
+
}
|
| 96 |
+
},
|
| 97 |
+
"image_paths": {
|
| 98 |
+
"1": {
|
| 99 |
+
"figure_path": "2408.09972v1_figure_1.png",
|
| 100 |
+
"caption": "Figure 1: Architecture of the EC-Drive system. LLM-based motion planning is performed on edge devices within the vehicle, while complex inference tasks are offloaded to the cloud, which has larger models and more extensive resources.",
|
| 101 |
+
"url": "http://arxiv.org/html/2408.09972v1/x1.png"
|
| 102 |
+
},
|
| 103 |
+
"2": {
|
| 104 |
+
"figure_path": "2408.09972v1_figure_2.png",
|
| 105 |
+
"caption": "Figure 2: Motion planning process on the edge through large language models, utilizing vision and LiDAR data for real-time decision-making and execution. ROS stands for Robot Operating System, which is used to execute actions and provide feedback on the execution results.",
|
| 106 |
+
"url": "http://arxiv.org/html/2408.09972v1/x2.png"
|
| 107 |
+
},
|
| 108 |
+
"3": {
|
| 109 |
+
"figure_path": "2408.09972v1_figure_3.png",
|
| 110 |
+
"caption": "Figure 3: Instruction tuning of pretrained LLaMA2 models for autonomous driving, using multi-view images and prompt for efficient adaptation to specific driving scenarios.",
|
| 111 |
+
"url": "http://arxiv.org/html/2408.09972v1/x3.png"
|
| 112 |
+
},
|
| 113 |
+
"4": {
|
| 114 |
+
"figure_path": "2408.09972v1_figure_4.png",
|
| 115 |
+
"caption": "Figure 4: Edge model performs step-by-step reasoning and decision making in a complex traffic environment",
|
| 116 |
+
"url": "http://arxiv.org/html/2408.09972v1/x4.png"
|
| 117 |
+
},
|
| 118 |
+
"5": {
|
| 119 |
+
"figure_path": "2408.09972v1_figure_5.png",
|
| 120 |
+
"caption": "Figure 5: The cloud model addresses incremental driving scenarios, and the yellow dotted line shows the logical dependencies between stages.",
|
| 121 |
+
"url": "http://arxiv.org/html/2408.09972v1/x5.png"
|
| 122 |
+
},
|
| 123 |
+
"6": {
|
| 124 |
+
"figure_path": "2408.09972v1_figure_6.png",
|
| 125 |
+
"caption": "Figure 6: Comparison of inference results between edge and cloud models in the same scenario.",
|
| 126 |
+
"url": "http://arxiv.org/html/2408.09972v1/x6.png"
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
"validation": true,
|
| 130 |
+
"references": [],
|
| 131 |
+
"url": "http://arxiv.org/html/2408.09972v1"
|
| 132 |
+
}
|
20240819/2408.10002v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.10043v1.json
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Stacked Intelligent Metasurfaces for Integrated Sensing and Communications",
|
| 3 |
+
"abstract": "Stacked intelligent metasurfaces (SIM) have recently emerged as a promising technology, which can realize transmit precoding in the wave domain. In this paper, we investigate a SIM-aided integrated sensing and communications system, in which SIM is capable of generating a desired beam pattern for simultaneously communicating with multiple downlink users and detecting a radar target. Specifically, we formulate an optimization problem of maximizing the spectrum efficiency, while satisfying the power constraint of the desired direction. This requires jointly designing the phase shifts of the SIM and the power allocation at the base station. By incorporating the sensing power constraint into the objective functions as a penalty term, we further simplify the optimization problem and solve it by customizing an efficient gradient ascent algorithm. Finally, extensive numerical results demonstrate the effectiveness of the proposed wave-domain precoder for automatically mitigating the inter-user interference and generating a desired beampattern for the sensing task, as multiple separate data streams transmit through the SIM.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "With the rapid development of emerging intelligent services such as intelligent transportation, drones, and the Internet of Things, next-generation wireless networks not only need to support stringent communication performance such as high transmission rate and low latency, but also to provide high-accuracy sensing service, such as detection, localization, and tracking[1 ###reference_b1###]. Therefore, integrated sensing and communications (ISAC) is considered to be one of the core features of the sixth-generation wireless network.\nIn ISAC systems, radar and communication devices need to share the same hardware and waveform. Additionally, the constraints introduced by sensing capabilities will limit the degrees of freedom in waveform design[1 ###reference_b1###]. To solve this, significant efforts have been made focusing on the unified beamforming design [2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###]. While these ISAC schemes achieve favorable performance trade-offs between sensing and communications, they generally rely on digital beamforming. When an extremely large aperture array is utilized, the conventional fully digital architecture typically requires a large number of radio frequency (RF) chains, resulting in high hardware costs. Although hybrid beamforming schemes could lower the hardware cost, the constant-modulus constraint of the analog component reduces the degrees of freedom in waveform design, which would lead to a performance penalty compared to the digital schemes assigning each antenna with an RF chain. To address this challenge, stacked intelligent metasurfaces (SIM) have been proposed to enable wave-based beamforming, which can achieve full-precision digital beamforming while reducing the hardware cost[7 ###reference_b7###]. Thus, SIM has the potential to replace traditional digital beamforming [8 ###reference_b8###].\nSpecifically, SIM consists of multiple layers of programmable metasurfaces with a structure similar to an artificial neural network [9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###]. By appropriately designing its hardware structure and optimizing the transmission coefficients of meta-atoms, SIM can carry out advanced signal processing directly in the native electromagnetic (EM) wave regime, such as matrix operations[12 ###reference_b12###, 7 ###reference_b7###]. This is in contrast to conventional single-layer reconfigurable intelligent surfaces (RIS), which are typically utilized to reshape wireless propagation environments [13 ###reference_b13###, 5 ###reference_b5###].\nTo elaborate, An et al. [7 ###reference_b7###] proposed a novel SIM-aided holographic MIMO framework, which accomplishes transmit precoding and reception combining automatically as the EM waves pass through the SIM. To further exploit the potential of SIM, the authors of [14 ###reference_b14###] applied SIM to the downlink of a multi-user MISO system where the interference between user equipments (UEs) is efficiently suppressed. Following this, substantial works examine the application of the SIM in practical wireless systems, such as the SIM-aided channel modeling [15 ###reference_b15###] and estimation[16 ###reference_b16###], hybrid digital and wave-based architecture[17 ###reference_b17###], etc. It was demonstrated that by leveraging multiple metasurface layers, SIM possesses powerful computing power and outperforms its single-layer counterparts [7 ###reference_b7###, 14 ###reference_b14###, 18 ###reference_b18###, 19 ###reference_b19###, 20 ###reference_b20###, 13 ###reference_b13###, 21 ###reference_b21###, 22 ###reference_b22###]. Besides, by modeling the input layer of SIM as a uniform planar array (UPA) and training its multilayer architecture, SIM can perform the two-dimensional discrete Fourier transform and estimate the direction-of-arrival of the target [23 ###reference_b23###]. Based on these observations, SIM has the potential to generate dual-functional beampatterns for ISAC applications, which, however, has not been hitherto studied yet.\nAgainst this background, we present a SIM-aided ISAC system in this paper, focusing particularly on maximizing the spectrum efficiency (SE) of downlink communication systems while directing a beam towards a single target of interest. In contrast to the conventional digital precoding schemes, a SIM is capable of generating multiple beams towards a radar target and communication users with minimized interference, so as to achieve the goal of dual-functional integration of communication and sensing relying on wave-based computation.\nNotations: Bold lowercase and uppercase letters denote vectors and matrices, respectively; represents the Hermitian transpose; denotes the imaginary part of a complex number ; is the Frobenius norm; stands for the expectation operation; is the logarithmic function with base ; produces a diagonal matrix with the elements of on the main diagonal; is the sinc function; represents the space of complex-valued matrices; The distribution of a circularly symmetric complex Gaussian random vector with a mean vector and a covariance matrix is expressed as , where stands for \u201cdistributed as\u201d; denotes the identity matrix."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II System Model",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A SIM Model",
|
| 21 |
+
"text": "Fig. 1 illustrates the proposed ISAC system, where a SIM serves as the radome of the base station (BS), facilitating the shared use of all BS antennas for both downlink communication to K UEs and radar sensing of a target of interest. Additionally, we highlight that in practice the communication signal is generally known by the transmitter and collaborative receiver. Hence, the communication signal is also employed as the radar probing waveform to enable dual function[2 ###reference_b2###]. Note that in contrast to conventional systems, SIM carries out ISAC beamforming in the wave domain for efficiently eliminating the multi-user interference and generating a desired beam.\nThe SIM consists of evenly distributed metasurface layers, with each layer comprising meta-atoms[7 ###reference_b7###]. These meta-atoms can manipulate the EM behavior, as the waves pass through them. In the proposed system, each data stream is sent separately from a different antenna to the UE, and beamforming is performed automatically as information-carrying EM waves propagate through the SIM, eliminating the need for the digital beamformer. However, the number of antennas typically differs from the number of UEs, which needs antenna selection in practice[19 ###reference_b19###]. Since we are focusing on wave-domain beamforming for ISAC, we assume that both the number of antennas and the number of users are for simplicity. Let represent the sets of metasurfaces, meta-atoms on each layer, and UEs, respectively. The diagonal phase shift matrix of the -th metasurface layer can be written as\nwhere \ndenotes the EM response of the -th meta-atom on the -th layer, and is the corresponding phase shift. We can connect these meta-atoms to a smart controller and produce a customized spatial waveform shape at the output of the metasurface layer by subtly configuring the SIM phase shifts[24 ###reference_b24###, 12 ###reference_b12###].\nMoreover, the transmission matrix from the -st to the -th metasurface layer can be written as . According to the Rayleigh-Sommerfeld diffraction theory, the -th entry of is given by , where is the wavelength, is the area of each meta-atom in the SIM, represents the angle between the propagation direction and the normal direction of the -st transmit metasurface layer, while denotes the corresponding propagation distance. In particular, we define , and represents the transmission vector from the -th antenna to the first metasurface layer of the SIM. Similarly, the -th entry of is obtained by replacing and in the preceding expression with and , respectively. As a result, the transfer function of SIM is formulated by [7 ###reference_b7###, 24 ###reference_b24###, 14 ###reference_b14###, 25 ###reference_b25###]\n###figure_1###"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II-B Signal Model",
|
| 27 |
+
"text": "We denote , where , i.e., the -th entry of , represents the information symbol intended for UE , and assume that they are mutually independent such that [2 ###reference_b2###].\nThe transmission signal vector from the output layer of the SIM is given by\nwhere characterizes the transfer function from the antenna array to the output layer of the SIM, represents the power allocation for UEs. Thus, the waveform covariance matrix is written as ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "II-C Communication Model",
|
| 33 |
+
"text": "We define , where is the propagation channel from the SIM to the -th user [14 ###reference_b14###].\nWe assume that the channel between the SIM and UEs is flat Rayleigh fading, i.e.,\nwhere has been perfectly estimated by utilizing uplink pilot signals [17 ###reference_b17###], and represents the distance-dependent path loss for the communication link between SIM and the -th UE. is modeled as ,\nwhere denotes the propagation distance between the SIM and the -th UE. Additionally, the free space path loss at a reference distance of m is represented by , and represents the path loss exponent. Additionally, quantifies the spatial correlation among the channels associated with different meta-atoms on the final layer. When considering an isotropic scattering environment with uniformly distributed multipath components, is expressed by , where denotes the corresponding meta-atom spacing[26 ###reference_b26###]. Thus, the signal received by the -th user can be expressed as\nwhere represents additive white Gaussian noise (AWGN) and is the average noise power at UE ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.4",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "II-D Sensing Model",
|
| 39 |
+
"text": "Specifically, the output layer of the SIM is modeled as a UPA with antenna spacing of . By properly configuring the phase shifts of meta-atoms in each layer, the SIM can emit a concentrated beam toward the target\u2019s direction. The channel between the SIM and the radar\u2019s targets is modeled as a line-of-sight propagation channel[2 ###reference_b2###]. As shown in Fig. 1 ###reference_###, we consider a far-field target with the azimuth angle of and the elevation angle of . and denote the number of meta-atoms in the -axis and -axis, respectively. The steering vector of the SIM is derived as[23 ###reference_b23###]\nwhere we have . Besides, and are the steering vector of -axis and -axis, respectively.\nAssuming that the radar target is in the direction of , the beampattern gain directed towards the target is written as [3 ###reference_b3###].\nIn this paper, we aim to design the SIM response to make sure that meets the desired value for the sensing task\u2019s requirement."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "III Problem Formulation and Solution",
|
| 45 |
+
"text": ""
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.1",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-A Problem Formulation",
|
| 51 |
+
"text": "In this subsection, we formulate an optimization problem of maximizing the SE by jointly optimizing the power allocation coefficients at the BS and the phase shifts of the SIM, subject to the total power and desired beampattern constraints. Specifically, the optimization problem is formulated as\nwhere denotes the desired beampattern gain at the target\u2019s direction, and is the signal-to-interference-plus-noise ratio of UE . In order to simplify the problem in (7 ###reference_###), we transform the beamforming pattern constraint as a penalty term to the objective function. Then the problem can be reformulated as follows:\nwhere in the second term represents the penalty factor for striking a flexible tradeoff between the sum rate and the desired beampattern gain. Then, the optimal solution of the constrained problem (7 ###reference_###) is approached by solving problem (8 ###reference_###)."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.2",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "III-B The Proposed Gradient Ascent Algorithm",
|
| 57 |
+
"text": "To address problem (8 ###reference_###), we propose an efficient gradient ascent algorithm to iteratively adjust the phase shifts of the SIM and the power allocation coefficients until reaching convergence. The specific steps of the algorithm are outlined as follows:\nStep 1: Initialize the phase shifts and power allocation coefficients\nThe phase shifts of the SIM are randomly initialized and the power allocation coefficients are obtained using the water-filling algorithm. In order to avoid the gradient ascent method falling into a local optimal solution, we first generate multiple sets of phase shifts and then select the SIM configuration resulting in the maximum value of for initialization[7 ###reference_b7###].\nStep 2: Calculate the partial derivatives\nFor , the gradient of the objective function with respect to the phase shift of the -th meta-atom on the -th layer of the SIM is calculated by\nIn order to simplify the expression, we firstly define and write as . Hence, the two terms of (9 ###reference_###) can be explicitly expressed as\nwhere is obtained by\nBesides, and denote the -th row of and the -th column of respectively. And and are defined as\nNext, the gradient of the objective function with respect to the power allocation coefficients at the BS is calculated by\nStep 3: Normalize the partial derivatives\nTo reduce the risk of oscillation during optimization, we normalize the calculated partial derivatives as follow[7 ###reference_b7###]\nwhere and represent, respectively, the maximum value of the partial derivative with respect to the phase shifts of the SIM and the maximum value of the partial derivative with respect to the power allocation coefficients.\nStep 4: Update the phase shifts and power allocation coefficients\nThen, we can update the phase shifts of the SIM and the power allocation coefficients by\nwhere represents the Armijo step size, which is determined by utilizing the backtracking line search during each iteration [14 ###reference_b14###]. In order to satisfy (7 ###reference_###h) and (7 ###reference_###i), we first compensate the power of all UEs to make sure that the minimum value is larger than zero in every update. Then we perform normalization by taking\n.\n###figure_2### ###figure_3### By repeating Steps 2-4, we obtain the phase shifts of the SIM and power allocation coefficients as the objective function value converges. For clarity, we summarize the major steps of the proposed gradient ascent method in Algorithm 1."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "IV Numerical Results",
|
| 63 |
+
"text": "In this section, we provide numerical results for characterizing the performance of the proposed SIM-aided ISAC system."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.1",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "IV-A Simulation Setups",
|
| 69 |
+
"text": "In the SIM-aided ISAC system shown in Fig. 1, choosing the appropriate hardware parameters for SIM is essential. For instance, having too many meta-atoms and layers can increase the computational complexity of solving the problem, whereas having too few may result in insufficient performance[7 ###reference_b7###, 13 ###reference_b13###]. Thus, we set . The total thickness of the SIM is , so the distance between adjacent metasurface layers is . The spacing between meta-atoms is .\nTo make sure a fair comparison, we set , where is the normalized beampattern gain and is the scale factor. The beampattern gain is evaluated with respect to the corresponding omnidirectional beampattern gain. The coordinates of the users are , and the channel between the SIM and UE is described by (4 ###reference_###), where we have . In addition, we consider a system that operates at a carrier frequency of 28 GHz. The target of interest is located at . The penalty factor is set to 2."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "IV-B Performance Evaluation of the Proposed Algorithm",
|
| 75 |
+
"text": "Fig. 2 verifies the convergence performance of the proposed gradient ascent algorithm for , from which we can see that the objective function reaches convergence within 10 iterations under different setups. Moreover, a larger number of metasurface layers results in a higher SE, thanks to the array gain provided by a large metasurface aperture.\nIn Fig. 3, we compare the SE under different beampattern gain thresholds . Observe from Fig. 3 that the SE gradually decreases as increases. This is due to the fact that as increases, the increased power allocation for sensing tasks results in reduced power for UEs and constraints on waveform design. Besides, in comparison to a single-layer configuration, a 7-layer SIM exhibits a remarkable 33% SE gain, as the multi-layer architecture of the SIM provides more powerful computing capability to mitigate inter-user interference in the wave domain. Nevertheless, when the number of metasurface layers exceeds a certain value, overly dense SIM layers may cause performance degradation. This is because the SIM\u2019s transfer function in (3) highly relies on an appropriate inter-layer propagation matrix, while a small layer spacing would result in the transmission matrix becoming diagonal. In practical ISAC systems, one should take into account the fundamental tradeoffs to determine the best SIM hardware parameters first and then configure its phase shifts for realizing desired computing functionality in the wave domain.\n###table_1### ###figure_4### ###figure_5### (a)\n(b)\n###table_2### ###figure_6### ###figure_7### (a)\n(b)\nFig. 4 compares the beampattern generated by the SIM in the ISAC scenario and communication-only system, respectively.\nCompared to the communication-only system, the proposed SIM-based transmitting array is capable of generating a desired beam in the target direction by leveraging the communication signals with waveforms known by the local sensing receiver. To illustrate, we also plot horizontal and vertical profiles of SIM-generated 3D beampattern in Figs. 5(a) and (b), respectively. Note that under both setups the beampattern gain in the desired direction satisfies the constraint condition, which demonstrates the effectiveness of the dual-functional precoding in the wave domain."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Conclusions",
|
| 81 |
+
"text": "In this paper, a SIM-aided ISAC system was proposed, where all antennas are shared for communication and radar sensing. In contrast to conventional digital beamforming, a SIM was utilized to perform downlink beamforming in the wave domain. We formulated an optimization problem to maximize the SE of the communication users and ensure the desired gain at the sensing target. And extensive simulation results corroborated that the proposed system is capable of serving multiple UEs with satisfactory SE, while ensuring the desired beampattern gain. Besides, our simulation results also demonstrated that an appropriate increase in the number of SIM layers can effectively mitigate the inter-user interference automatically as multiple data streams are transmitted through the SIM. In subsequent work, we will focus on the SIM-aided ISAC system, taking into account the processing of echo signals and utilizing SIM to achieve wave-domain signal processing in a range of sensing functions such as detection, localization, and tracking."
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"appendix": [],
|
| 85 |
+
"tables": {},
|
| 86 |
+
"image_paths": {
|
| 87 |
+
"1": {
|
| 88 |
+
"figure_path": "2408.10043v1_figure_1.png",
|
| 89 |
+
"caption": "Figure 1: A SIM-aided ISAC system.",
|
| 90 |
+
"url": "http://arxiv.org/html/2408.10043v1/x1.png"
|
| 91 |
+
},
|
| 92 |
+
"2": {
|
| 93 |
+
"figure_path": "2408.10043v1_figure_2.png",
|
| 94 |
+
"caption": "Figure 2: The convergence curves of the proposed gradient ascent algorithm (M=100,\u0393=8\u2062dBiformulae-sequence\ud835\udc40100\u03938dBiM=100,\\Gamma=8~{}\\text{dBi}italic_M = 100 , roman_\u0393 = 8 dBi).\n",
|
| 95 |
+
"url": "http://arxiv.org/html/2408.10043v1/x2.png"
|
| 96 |
+
},
|
| 97 |
+
"3": {
|
| 98 |
+
"figure_path": "2408.10043v1_figure_3.png",
|
| 99 |
+
"caption": "Figure 3: The SE versus the number of metasurface layers (M=100\ud835\udc40100M=100italic_M = 100).\n",
|
| 100 |
+
"url": "http://arxiv.org/html/2408.10043v1/x3.png"
|
| 101 |
+
},
|
| 102 |
+
"4(a)": {
|
| 103 |
+
"figure_path": "2408.10043v1_figure_4(a).png",
|
| 104 |
+
"caption": "Figure 4: Beampattern comparison for M=100,Q=7,K=4,\u0393=8\u2062dBiformulae-sequence\ud835\udc40100formulae-sequence\ud835\udc447formulae-sequence\ud835\udc3e4\u03938dBiM=100,Q=7,K=4,\\Gamma=8~{}\\textrm{dBi}italic_M = 100 , italic_Q = 7 , italic_K = 4 , roman_\u0393 = 8 dBi. (a) ISAC; (b) communication-only system.",
|
| 105 |
+
"url": "http://arxiv.org/html/2408.10043v1/x4.png"
|
| 106 |
+
},
|
| 107 |
+
"4(b)": {
|
| 108 |
+
"figure_path": "2408.10043v1_figure_4(b).png",
|
| 109 |
+
"caption": "Figure 4: Beampattern comparison for M=100,Q=7,K=4,\u0393=8\u2062dBiformulae-sequence\ud835\udc40100formulae-sequence\ud835\udc447formulae-sequence\ud835\udc3e4\u03938dBiM=100,Q=7,K=4,\\Gamma=8~{}\\textrm{dBi}italic_M = 100 , italic_Q = 7 , italic_K = 4 , roman_\u0393 = 8 dBi. (a) ISAC; (b) communication-only system.",
|
| 110 |
+
"url": "http://arxiv.org/html/2408.10043v1/x5.png"
|
| 111 |
+
},
|
| 112 |
+
"5(a)": {
|
| 113 |
+
"figure_path": "2408.10043v1_figure_5(a).png",
|
| 114 |
+
"caption": "Figure 5: Profiles of Fig. 4. (a) Horizontal beampattern; (b) Vertical beampattern.",
|
| 115 |
+
"url": "http://arxiv.org/html/2408.10043v1/x6.png"
|
| 116 |
+
},
|
| 117 |
+
"5(b)": {
|
| 118 |
+
"figure_path": "2408.10043v1_figure_5(b).png",
|
| 119 |
+
"caption": "Figure 5: Profiles of Fig. 4. (a) Horizontal beampattern; (b) Vertical beampattern.",
|
| 120 |
+
"url": "http://arxiv.org/html/2408.10043v1/x7.png"
|
| 121 |
+
}
|
| 122 |
+
},
|
| 123 |
+
"validation": true,
|
| 124 |
+
"references": [],
|
| 125 |
+
"url": "http://arxiv.org/html/2408.10043v1"
|
| 126 |
+
}
|
20240819/2408.10086v1.json
ADDED
|
@@ -0,0 +1,462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "ARMADA: Attribute-Based Multimodal Data Augmentation",
|
| 3 |
+
"abstract": "In Multimodal Language Models (MLMs), the cost of manually annotating high-quality image-text pair data for fine-tuning and alignment is extremely high.\nWhile existing multimodal data augmentation frameworks propose ways to augment image-text pairs, they either suffer from semantic inconsistency between texts and images, or generate unrealistic images, causing knowledge gap with real world examples.\nTo address these issues, we propose Attribute-based Multimodal Data Augmentation (ARMADA), a novel multimodal data augmentation method via knowledge-guided manipulation of visual attributes of the mentioned entities. Specifically, we extract entities and their visual attributes from the original text data, then search for alternative values for the visual attributes under the guidance of knowledge bases (KBs) and large language models (LLMs). We then utilize an image-editing model to edit the images with the extracted attributes.\nARMADA is a novel multimodal data generation framework that: (i) extracts knowledge-grounded attributes from symbolic KBs for semantically consistent yet distinctive image-text pair generation, (ii) generates visually similar images of disparate categories using neighboring entities in the KB hierarchy, and (iii) uses the commonsense knowledge of LLMs to modulate auxiliary visual attributes such as backgrounds for more robust representation of original entities.\nOur empirical results over four downstream tasks demonstrate the efficacy of our framework to produce high-quality data and enhance the model performance. This also highlights the need to leverage external knowledge proxies for enhanced interpretability and real-world grounding.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Multimodal Language Models (MLMs) exhibit remarkable abilities in comprehending and integrating various modalities, encompassing texts, images, and videos.\nRecently, many MLMs have been proposed by researchers in both academic and industrial communities (Li et al., 2020 ###reference_b17###; Radford et al., 2021 ###reference_b27###; Li et al., 2022a ###reference_b15###, b ###reference_b16###; Liu et al., 2023b ###reference_b19###; Dai et al., 2023 ###reference_b7###; Achiam et al., 2023 ###reference_b1###), demonstrating significant achievements across various downstream tasks, such as image-text retrieval (Radford et al., 2021 ###reference_b27###; Li et al., 2022a ###reference_b15###) and visual question answering (VQA) (Liu et al., 2023b ###reference_b19###, a ###reference_b18###; Dai et al., 2023 ###reference_b7###). Training MLMs for downstream tasks, which usually involves fine-tuning and alignment stages, requires substantial amounts of annotated data.\nHowever, collecting and annotating such datasets demand considerable human effort and are notorious for their expense and time-consuming nature. A common strategy to overcome this problem is leveraging data augmentation techniques, which automatically synthesize new data instances from existing datasets, relieving the need to rely on manually annotated datasets to train these models.\n###figure_1### Existing multimodal data augmentation methods, which require the perturbation of both the visual and textual modalities in tandem, can generally be classified into the following two groups: (i) latent space-based methods that perturb the latent representations of existing data instances (Liu et al., 2022 ###reference_b20###) via adversarially trained augmentation networks, and (ii) surface form-based methods (M\u00fcller and Hutter, 2021 ###reference_b23###; Hao et al., 2023 ###reference_b10###) that simply perturb superficial representations such as orientations/pixel-level mixture of images. Latent space-based methods such as LeMDA (Liu et al., 2022 ###reference_b20###) generate augmented multimodal latent features aligned with the training data distribution, but are inherently confined by their lack of interpretability and controllability. While surface form-based methods partly provide interpretable and controllable alternative, their simple augmentation schemes such as random solarization and pixel-level interpolation lead to semantic inconsistency. For instance, Figure 1 ###reference_### shows that random cropping or image interpolation cause semantic gaps between paired images and texts, leading to images far from realistic. Moreover, such perturbations cannot deal with variable entity categories that appear in a similar background, or same entities with variable physical attributes, since they disregard attribute-level details.\nOur work aims to address these issues by leveraging a rich bank of attributes from a hierarchical knowledge base for interpretable and controllable multimodal data augmentation that guarantees semantic consistency and knowledge-grounding of generated entities.\nIn this paper, we introduce a novel attribute-based, multimodal data augmentation framework, ARMADA, that extracts the entities and visual attributes, then modifies the visual attributes of entities in images by building an entity-attribute multimodal knowledge base (KB). We perform entity-related knowledge extraction through entity linking using Spacy Entity Linker on Wikidata KB to: (i) generate augmented images and texts that faithfully reflect knowledge-grounded, entity-related attributes, and (ii) exploit the neighboring entities, e.g., a Boston Terrier and French Bulldog in Figure 1 ###reference_###, for generating similar yet distinguished entity categories. Our work also leverages LLMs as additional knowledge proxy as they can generate alternatives to any textual attributes without related entities in KB.\nWe then modify images based on revised texts by employing an off-the-shelf image editing model, InstructPix2Pix (Brooks et al., 2023 ###reference_b3###). Our framework produces semantically consistent, knowledge-grounded multimodal data instances. In-depth experiments across four different image-text downstream tasks against five different baselines demonstrate the significance of augmenting multimodal data instances guided by entity-related attribute knowledge.\nOur contributions can be summarized as follows:\nWe propose a knowledge-guided multimodal data augmentation framework that is guided by entity-centric KBs to generate entities that are of the same type yet differing attributes, or of similar yet disparate categories.\nThe proposed augmentation pipeline in this work demonstrates semantically consistent and knowledge-grounded multimodal data, addressing the limitations of previous multimodal data augmentation methods.\nOur empirical results demonstrate that our proposed data augmentation strategy leads to substantial gains in various image-text downstream tasks such as image-text retrieval, VQA, image captioning, and especially in fine-grained image classification tasks that rely on attribute-centric information."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "External Knowledge Proxies.\nExternal symbolic knowledge bases (KBs) like Wikidata (Vrande\u010di\u0107 and Kr\u00f6tzsch, 2014 ###reference_b32###) and real-world knowledge proxies like large language models (LLMs) (Achiam et al., 2023 ###reference_b1###; Touvron et al., 2023 ###reference_b30###; Almazrouei et al., 2023 ###reference_b2###) contain ample amount of real-world, entity-centric knowledge.\nWhile symbolic KBs have frequently been used in various domains of natural language processing for augmentation (LUO et al., 2023 ###reference_b21###; Sun et al., 2023 ###reference_b28###; Pan et al., 2024 ###reference_b26###), the use of symbolic KBs in the multimodal domain is yet to be explored. LLMs, while they may suffer from hallucinatory outputs, contain rich world knowledge that enables them to generalize to attributes of various kinds. Our work reaps the benefits of the both worlds by exploiting the relational knowledge of KBs and generalization abilities of LLMs to perform knowledge-guided multimodal data augmentation.\nVision Language Models.\nVision Language Models (VLMs) have achieved new state-of-the-art performances across various downstream tasks such as image-to-text retrieval and visual question answering (VQA) (Radford et al., 2021 ###reference_b27###; Li et al., 2022a ###reference_b15###; Dai et al., 2023 ###reference_b7###; Liu et al., 2023b ###reference_b19###, a ###reference_b18###). CLIP (Radford et al., 2021 ###reference_b27###) is a widely used VLM for image-text retrieval and image classification.\nInstructBLIP (Dai et al., 2023 ###reference_b7###) and LLaVA (Liu et al., 2023b ###reference_b19###) are instruction-tuned multimodal models that combine vision encoders and LLMs.\nThe major drawback of these models is that they require an extensive amount of image-text pair datasets to either pre-train or fine-tune the models. Such shortcomings call for the need of a new, robust augmentation method, which our work aims to offer.\nData Augmentation.\nExisting work on data augmentation mainly focuses on augmenting a single modality, e.g., text (Thakur et al., 2021 ###reference_b29###; Yoo et al., 2021 ###reference_b34###; Chen et al., 2023 ###reference_b5###) or image (Luo et al., 2023 ###reference_b22###; Trabucco et al., 2023 ###reference_b31###; M\u00fcller and Hutter, 2021 ###reference_b24###).\nMost recently in the multimodal domain, several augmentation methods have been proposed to augment multiple modalities at the same time. MixGen (Hao et al., 2023 ###reference_b10###) generates new data instances by interpolating images and concatenating their accompanying texts. As discussed in Figure 1 ###reference_###, one potential issue is the low quality of the generated data.\nLeMDA (Liu et al., 2022 ###reference_b20###), another augmentation method that jointly augments multimodal data in the feature space, is limited in terms of interpretability and controllability since the generation occurs in latent space.\nBiAug (Wu et al., 2023 ###reference_b33###) augments multimodal data in a similar manner as our approach by decoupling entities and their attributes. However, BiAug heavily relies on LLMs to generate the attributes, which are susceptible to hallucinatory outputs. Our proposed approach, in contrast, leverages entity-related attributes from knowledge base and delegates entity independent perturbations to LLMs."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Our Approach",
|
| 21 |
+
"text": "###figure_2### Suppose we have a set of image-text pairs as the training dataset.\n is a task-dependent text that is paired with its corresponding image, .\nFor example, can be the label of image in image classification task, a caption that describes in image-text retrieval task, or a question-answer pair if the image appears in a VQA task.\nGiven that the training dataset with gold-standard annotations is usually too small to train the vision language model sufficiently well, we aim to augment the original training dataset and generate additional image-text pairs .\nThe augmented dataset can be used in conjunction with the original dataset to train the VLMs and further improve their performance."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Extracting Entities and Visual Attributes from Text",
|
| 27 |
+
"text": "The primary goal of our proposed data augmentation framework is to generate new images by modifying the value of visual attributes of the mentioned entities.\nFor example, as shown in Figure 2 ###reference_###, our data augmentation method changes the color (visual attribute) of a linckia laevigata (entity) from blue (attribute value) to orange (attribute value). The first step of text modification is to identify the mentioned entities and visual attributes of mentioned entities within a given piece of text.\nTo this end, we use large language models (LLMs) to extract entities, visual attributes and attribute values given an input text, , as they demonstrate exceptional capabilities in text comprehension and generation. Given an original image-text pair , we input the text into an LLM along with the prompt \u201cExtract the mentioned objects, their visual attributes, and values of visual attributes from the sentence: \u201d.\nFor example, as illustrated in Figure 2 ###reference_###, we can extract from the sentence \u201cA blue linckia laevigata rests on the coral reef\u201d that the entity is linckia laevigata, the visual attributes are color and location, and the attribute values are blue and coral reef, respectively.\nThe entities, visual attributes and their values serve as candidates for subsequent visual attribute value substitution."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "KB-based Visual Attribute Substitution",
|
| 33 |
+
"text": "Knowledge Base Construction. After identifying visual attributes mentioned in text we determine potential substitutions for their attribute values. We leverage attributes from entity-centric KBs to provide accurate and reliable knowledge for substituting visual attribute values.\nWe first parse the information from Wikidata and Wikipedia, and construct an attribute-level KB consisting of entities and their attributes, which consists of two steps:\n(1) Graph topology: We collect entities from Wikidata and use a node in the KB to represent an entity.\nEach node has an outgoing edge to its parent category node.\nFor instance, as illustrated in Figure 3 ###reference_###, both linckia laevigata and linckia guildingi belong to the parent category valvatida, thus resulting in two directed edges from these nodes to valvatida.\n(2) Node attributes: The visual information for each node in the KB is derived from its corresponding Wikipedia articles. We collect the textual content of each Wikipedia page, then employ LLMs to extract all visual attributes and their possible values described within the article.\nFor instance, the entity linckia laevigata may have color of blue and dark blue, with the number of arms starting from four.\n###figure_3### After building the KB, we link each entity extracted from to a node in the KB using the Spacy Entity Linker (Honnibal et al., 2020 ###reference_b12###).\nTo generate a new augmented data sample, we use the following two attribute value substitution methods.\nAttribute Substitution within Single Entity. A single entity may possess multiple plausible attributes, which are identifiable through entity linking to KB. Some of these extracted entities with specific attributes may occur less frequently in the original training dataset than those with more frequently occurring attributes. Therefore, we aim to augment the data to increase the coverage of such long-tail entity instances, so that the model is better fine-tuned to recognize these rare cases well. To elaborate, we randomly choose a visual attribute connected to the entity node and then sample an attribute value to substitute the current attribute value of .\nIn this case, the entity stays the same while only its one attribute value is changed. For example, blue linckia laevigata dark blue linckia laevigata as illustrated in Figure 2 ###reference_###.\nAttribute Substitution across Sibling Entities. In addition to substituting attributes within a single entity, we notice that there are many entities in KBs that belong to the same parent category and share many visual attributes in common, e.g., the linckia laevigata and henricia leviuscula in Figure 2 ###reference_###.\nThis inspires us to substitute attributes across these sibling entities to introduce similar but different concepts as augmented training data.\nIn this way, the model will contrastively learn from these confusing entities, thereby increasing its robustnesss to visually similar but different entity concepts.\nSpecifically, we consider changing the entity node to its sibling entity node who share the most visual attributes with .\nFor example, in Figure 3 ###reference_###, linckia laevigata and henricia leviuscula have many attributes in common, so it is feasible to change the original entity to the new entity.\nWe therefore substitute the entity linckia laevigata with henricia leviuscula, and then change its color for henricia leviuscula (e.g., orange).\nThe resulting substitution is therefore blue linckia laevigata orange henricia leviuscula."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.3",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "LLM-based Visual Attribute Substitution",
|
| 39 |
+
"text": "In some cases, the extracted entity or visual attribute is too general and cannot be linked to any node in the KB (e.g., coral reef serving as a background in Figure 2 ###reference_###).\nTherefore, in addition to KBs, we also use LLMs to obtain new values for auxiliary visual attributes such as background, as they are broadly trained on a large amount of data and thus have acquired commonsense knowledge to provide alternative attribute values for such cases.\nFor example, in Figure 2 ###reference_###, after we extract that the location is coral reef, we use the prompt \u201cWhat are other possible values for the <location> attribute in this sentence?\u201d to generate new location value substitutions, such as sandy bottom and rocky shores.\nIt is worth noting that LLMs may not consistently produce valid substitute attribute values, as they may lack adequate knowledge regarding specialized fields or long-tail concepts. This deficiency may lead to LLMs generating inaccurate responses, i.e., hallucination. For instance, when prompt the LLMs for all possible colors of linckia laevigata, LLMs may provide incorrect answers such as \u201corange\u201d and \u201cyellow\u201d, which are implausible colors for linckia laevigata. Therefore, we rely on KBs to extract accurate, knowledge-grounded attributes for substitution.\nIt is worth noting that the models we utilize in each component may not be perfect, which can affect the performance of the proposed approach. Our experimental results in Section 4.6 ###reference_### indicate that the error rates of the information extraction, entity linking, and visual attribute substitutions are relatively low, which do not significantly impact the quality of the generated data."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.4",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Image Editing",
|
| 45 |
+
"text": "After modifying an image-text pair to with a new text , we edit the image according to . We employ an image editing model InstructPix2Pix (Brooks et al., 2023 ###reference_b3###), which can take as input an image and instruction on how to modify the image, and output the modified image following the instruction.\nThe instruction here is \u201cChange the [attribute] of the [entity] to [value]\u201d, where [entity] and [attribute] are the mentioned entity and selected attribute type, respectively, and [value] is the new attribute value output by the KB or LLM.\nAs illustrated in Figure 2 ###reference_###, starting with the original image on the left, we generate three new images on the right using InstructPix2Pix with different instructions.\nThe first image keeps the entity linckia laevigata unchanged while changing its color to dark blue, whereas the second image changes the color to orange, updating the entity category to henricia leviuscula and its corresponding text description accordingly.\nThe third one is the result of changing the attribute of location to sandy beach by querying LLMs; this leaves the central entity of the image unperturbed, providing a robust way to leverage LLMs only for attributes that are not entity-related."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.5",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Augmented Data Selection",
|
| 51 |
+
"text": "Our method transforms an image-text pair to a modified image-text pair .\nHowever, not all modified image-text pairs are suitable as augmented data;\nsome image being too similar to their original counterpart , thereby providing minimal new signal for subsequent model training.\nConversely, other generated image diverging too much from their original counterpart may significantly drift the image away from the original data distribution and mislead the model training.\nTo determine the validity of the augmented data, we calculate the similarity between a generated image and its original image using the Fr\u00e9chet Inception Distance (FID) score (Heusel et al., 2018 ###reference_b11###). FID calculates the Fr\u00e9chet distance between feature vectors of the original and generated images, which aligns closely with human judgment and is frequently utilized to assess the quality of generated data.\nIdeally, we aim to empirically maintain the similarity score within a specific range to ensure that exhibit a reasonable amount of difference from as indicated in the ablation study.\nThe experimental results on selecting the similarity range is presented in Appendix A.3 ###reference_###."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Experiments",
|
| 57 |
+
"text": "To assess the effectiveness of data augmentation methods, we select four evaluation tasks: image classification, visual question answering, image-text retrieval, and image captioning."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Foundation Models and Baseline Methods",
|
| 63 |
+
"text": "We use CLIP (Radford et al., 2021 ###reference_b27###) and LLaVA-1.5 (7B) (Liu et al., 2023a ###reference_b18###) model as the foundation models in this work. CLIP is a multimodal model that uses contrastive learning to jointly align the visual and textual representations.\nLLaVA-1.5 is an open-source, auto-regressive multimodal vision-language model (VLM) trained by fine-tuning Vicuna-v1.5 (Chiang et al., 2023 ###reference_b6###) on GPT-4-generated multimodal instruction-following data. Given an image input and text instruction, LLaVA-1.5 generates output texts based on its reasoning upon the two modalities.\nWe use GPT-4 OpenAI (2023 ###reference_b25###) as the LLMs in each component.\nWe compare our proposed method against five different baseline methods to demonstrate its effectiveness (we do not include BiAug (Wu et al., 2023 ###reference_b33###) since the code has not been released yet):\n(1) Zero-shot: Models are evaluated without fine-tuning on any data. This setting is established to examine the initial ability of the models on all four downstream tasks.\n(2) NoAug: Only the original training data is used to fine-tune the models without any augmented data.\n(3) NaiveAug: Two naive augmentation methods are applied to texts and images independently as follows.\nWe use AEDA (Karimi et al., 2021 ###reference_b14###) to randomly insert punctuation marks into original text, and we use TrivialAugment (M\u00fcller and Hutter, 2021 ###reference_b24###) to randomly apply center cropping, rotation, or invert, to images.\n(4) MixGen (Hao et al., 2023 ###reference_b10###): Generates new data instance by interpolating images on the pixel-level and concatenating texts. This is state-of-the-art augmentation method. Specifically, given two image-text pairs and , a new image-text pair is generated by and , where is a hyper-parameter.\n(5) LeMDA (Liu et al., 2022 ###reference_b20###): Generates augmented data in the latent feature space. We use CLIP to encode the original training data into embeddings, then feed them to LeMDA to generate new latent embeddings; these embeddings are used as augmented data to fine-tune an MLP module in the image classification task. Note that LeMDA cannot be used for LLaVA-1.5 and cannot be used in tasks other than image classification."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Image Classification",
|
| 69 |
+
"text": "Dataset.\nWe use iNaturalist 2021 (Horn et al., 2018 ###reference_b13###) as the dataset for image classification.\nThe iNaturalist dataset consists of large scale species of plants and animals in the natural world.\nIt contains 10,000 species with a training set of 2.7M images.\nTo better mimic the scenario of annotated data scarcity, we sample from a mini dataset with all 246 species of Mammalia.\nEach class has 30/15/15 images for training/validation/inference.\nExperimental Setup.\nFor CLIP, we transform the class labels in iNaturalist dataset into natural language descriptions: \u201c[label]\u201d \u201ca photo of [label]\u201d, following caption formats in CLIP (Radford et al., 2021 ###reference_b27###).\nCLIP takes as input an image and all class labels, then outputs logit scores for these classes.\nThe label with the highest logit score is taken as the predicted result of CLIP model.\nFor LLaVA-1.5, we evaluate its performance by asking the model what is included in the image, and then verify whether the true labels are presented in the generated responses.\nThe evaluation prompt is: \u201cWhat is the name of the mammal that appears in this image? For example, if it\u2019s a picture of a bengal tiger, output a fine-grained label \u2018Bengal Tiger\u2019 or use its binomial nomenclature \u2018Panthera tigris tigris\u2019. Provide your answer:\u201d.\nThis allows us to assess model\u2019s classification ability based on the provided images.\nResults. The results of Precision, Recall, and for image classification task are presented in the left part of Table 1 ###reference_###.\nAs shown from the zero-shot results, the pretrained foundation models have poor performance on fine-grained concept recognition, with scores of and on CLIP and LLaVA, respectively.\nAfter fine-tuning with the original training data, both models have a much better performance, with a and absolute gain on scores.\nWhile both NaiveAug and LeMDA demonstrate some improvement in model performance, our method achieves the best results among all existing methods.\nIt is worth noting that the score of MixGen is worse than NoAug.\nThis is because the interpolation of images distorts the visual attribute of the fine-grained concepts, thereby adversely affects model training.\nConversely, our method is able to generate new images by modifying the visual attributes of entities.\nThis facilitates a more comprehensive learning of fine-grained concepts by foundation models."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Visual Question Answering",
|
| 75 |
+
"text": "Datasets.\nVisual Question Answering (VQA) v2.0 (Goyal et al., 2017 ###reference_b9###) dataset consists of open-ended questions to images.\nThese questions require understanding vision, language, and commonsense knowledge to provide answers.\nVQA-2.0 has 265,015 images and each image has at least 3 related questions.\nExperimental Setup.\nWe consider the VQA task as an answer generation task.\nWe utilize LLaVA as the foundation model.\nGiven the open-ended nature of the task, we let the model generate free-form answers without any constraints.\nThen we compute the textual similarity between the output of LLaVA and the true answer.\nResults.\nThe results of VQA task are shown in the right part of Table 1 ###reference_###.\nWe evaluate the performance on the test-dev dataset via textual similarities using Universal Sentence Encoder (USE) (Cer et al., 2018 ###reference_b4###) and BERTScore (Zhang et al., 2020 ###reference_b36###)..\nIt is clear that, compared with Zero-shot, the performance of LLaVA improves greatly after fine-tuning.\nThis is probably because the ground truth answers to the questions are typically simple and short, which makes the task relatively easier.\nAs demonstrated in the table, the textual similarity achieved by our method surpasses the best baseline method MixGen by on USE and on BERTScore."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.4",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Image-Text Retrieval",
|
| 81 |
+
"text": "Dataset.\nFlickr30k (Young et al., 2014 ###reference_b35###) contains 31,000 images, each with 5 human-annotated referenced sentences that describe the image.\nThis dataset is widely used in image-text retrieval task.\nSimilar to iNaturalist, we sample 5k images from the training set and use the entire 1k test set for evaluation.\nExperimental Setup.\nImage-text retrieval includes two subtasks: text-to-image and image-to-text retrieval. We use CLIP to calculate the embedding of the given image, as well as the embeddings of all candidate captions in the test set.\nWe compare the cosine similarity between the image embedding and each text embedding, and output top captions with the highest similarity scores as the retrieved results.\nWe follow existing work and use as evaluation metric.\nResults.\nThe results of image-text retrieval are shown in the left part of Table 2 ###reference_###.\nThe zero-shot performance of the pretrained CLIP is already very good on both image retrieval and text retrieval, because it is originally trained using the contrastive loss between image and text embeddings.\nAfter fine-tuning, the performance on both subtasks can be further improved in most cases.\nNote that the improvement of our method over baseline methods in this task appears less significant compared to other tasks.\nThis is primarily due to the already high zero-shot performance of CLIP, leaving limited room for further improvement."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.5",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Image Captioning",
|
| 87 |
+
"text": "Experimental Setup.\nImage captioning task aims to generate natural language descriptions of an image.\nWe use LLaVA-1.5 as the foundation model, and Flickr30k as the evaluation dataset as introduced in Section 4.4 ###reference_###. Specifically, given an image as input, we use the prompt \u201cDescribe this image using one simple sentence\u201d to ask LLaVA-1.5 to generate a caption.\nTo evaluate the quality of generated captions, we compare the textual similarity between the generated caption and the gold-standard annotation for a given image using USE and BERTScore.\nSince there may be multiple gold-standard captions for an image, we calculate the similarity score of a generated caption with each gold-standard caption, and return the maximum as the final score for this generated caption.\nResults. The results of image captioning task are presented in the right part of Table 2 ###reference_###.\nOur method ARMADA achieves the best performance over all baseline methods.\nSpecifically, the performance gain of our method on USE score is over NoAug, and over the best baseline augmentation method MixGen.\nWe provide detailed case analysis of the generated captions by our method and by baseline methods in Appendix B ###reference_###."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.6",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "Error Analysis",
|
| 93 |
+
"text": "We investigate the error rate of each component in the data augmentation process and how they affect our model. Specifically, we manually check the correctness of attribute extraction and the visual attribute substitution.\nIt turns out that the percentage of incorrect attributes that are extracted is quite low (4 / 113 = 3.5%). The percentage of inappropriate substitution by LLMs is also very low (1 / 73 = 2.7%). The visual attribute substitutions from KBs are template-based substitutions from possible attribute values, which will not incur any error aggregation issues."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusions and Future Work",
|
| 99 |
+
"text": "We propose a novel data augmentation method that utilizes KBs and LLMs to generate multimodal data. The proposed framework is able to generate semantically consistent data that solves the potential issues of the existing methods. Our method significantly improves the MLM\u2019 performance on various downstream tasks, without the need of high-cost annotated data. Experiment results also demonstrate the effectiveness of our proposed method compared to the baseline methods.\nIn the future, we aim to incorporate more modalities into our framework such as video and audio.\nWe also plan to rank visual attributes and select the most influential attributes for augmentation.\nMoreover, existing image editing tools our framework relies on do not perform consistently well. Designing a new visual attribute editing model to further enhance the quality of the augmented data is also a promising research direction."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "6",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "Limitations",
|
| 105 |
+
"text": "Our proposed method demonstrates the effectiveness only on image-text data. However, to enhance the practical utility of our method, it would be advantageous to expand our data augmentation method to include more modalities, such as video and audio. Furthermore, as discussed earlier, although the error rate in each component is low and will not affect the performance much, we still aim to incorporate better attribute extraction and visual attribute substitution models into the framework to further improve our method."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "7",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Ethical Consideration",
|
| 111 |
+
"text": "We acknowledge that our word is aligned with the ACL Code of the Ethics Gotterbarn et al. (2018 ###reference_b8###) and will not raise ethical concerns.\nWe do not use sensitive datasets/models that may cause any potential issues."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [
|
| 115 |
+
{
|
| 116 |
+
"section_id": "Appendix 1",
|
| 117 |
+
"parent_section_id": null,
|
| 118 |
+
"section_name": "Appendix A Ablation Study",
|
| 119 |
+
"text": "To investigate the impact of the amount of the augmented data, we conduct experiments by varying the size of the augmented data relative to the size of the original training data, ranging from 0% to 300%.\nThe results in Table 3 ###reference_### show a decline in model performance when the augmented data size significantly surpassed the original training data size (exceeding to ), potentially due to excessive noise introduced by the augmented data. Our findings suggest that, the augmented data size should approximate that of the original training data for best performance.\nTo assess the importance of utilizing KBs, we conduct additional experiments on the image classification task by solely relying on LLMs to do attribute value substitution.\nFollowing the aforementioned experimental setup, we fine-tune a CLIP model on the iNaturalist dataset.\nThe score exhibits a decline (from to ) without using KBs.\nThis suggests that though LLMs are able to provide answers for attribute value substitution, the hallucination issue on fine-grained or rare entities can still introduce noise to the training data, thereby impacting the model performance.\nWe conduct experiments to investigate how the similarity between augmented and original data impact the model performance.\nIn the image classification task, we split the augmented dataset into four groups of equal size according to the similarity of the edited image with its original image.\nThen we use each group as the augmented data to train CLIP.\nThe scores of the four groups are , , , and , respectively, from most-similar to most-dissimilar.\nThe results support our claim in Section 3.5 ###reference_### that maintaining similarity scores within a reasonable range achieves the best performance.\n###figure_4###"
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"section_id": "Appendix 2",
|
| 123 |
+
"parent_section_id": null,
|
| 124 |
+
"section_name": "Appendix B Case Analysis on the Results of Image Captioning",
|
| 125 |
+
"text": "We perform a case analysis to illustrate the effectiveness of our method.\nIn Figure 4 ###reference_###, we present two image-caption pairs from the Flickr30k dataset, including both the human-annotated captions and the captions generated by Zero-shot, NoAug, and ARMADA (using LLaVA as the foundation model).\nFor the image on the left, our method is able to identify the fine-grained concept karate whereas the zero-shot and NoAug methods generate a more generalized concept martial arts.\nFor the image on the right, the caption generated by our method provides a more detailed and accurate description of the hat, which specifies its knit pattern and the beer logo pattern.\nThese examples suggest that LLaVA can effectively learn the visual attributes and identifies the fine-grained concepts through our method."
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"section_id": "Appendix 3",
|
| 129 |
+
"parent_section_id": null,
|
| 130 |
+
"section_name": "Appendix C Ethical Consideration",
|
| 131 |
+
"text": "We acknowledge that our word is aligned with the ACL Code of the Ethics Gotterbarn et al. (2018 ###reference_b8###) and will not raise ethical concerns.\nWe do not use sensitive datasets/models that may cause any potential issues."
|
| 132 |
+
}
|
| 133 |
+
],
|
| 134 |
+
"tables": {
|
| 135 |
+
"1": {
|
| 136 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.45\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.45.46.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_tt\" id=\"S4.T1.45.46.1.1\" rowspan=\"2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.45.46.1.1.1\">Method</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" colspan=\"3\" id=\"S4.T1.45.46.1.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.45.46.1.2.1\">Image Classification (CLIP)</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" colspan=\"2\" id=\"S4.T1.45.46.1.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.45.46.1.3.1\">Image Classification (LLaVA)</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S4.T1.45.46.1.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.45.46.1.4.1\">VQA</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.7.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.2.2.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.3.3.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.4.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T1.5.5.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.6.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.7.7.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.14.14\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.14.14.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">Zero-shot</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.8.8.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.9.9.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.10.10.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.11.11.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S4.T1.12.12.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.13.13.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.14.14.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.21.21\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.21.21.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">NoAug</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.15.15.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.16.16.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.17.17.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.18.18.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T1.19.19.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.20.20.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.21.21.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.28.28\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.28.28.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">NaiveAug</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.22.22.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.23.23.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.24.24.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.25.25.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T1.26.26.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.27.27.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.28.28.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.35.35\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.35.35.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">MixGen</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.29.29.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.30.30.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.31.31.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.32.32.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T1.33.33.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.34.34.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.35.35.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.38.38\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.38.38.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">LeMDA</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.36.36.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.37.37.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.38.38.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.38.38.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T1.38.38.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">-</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.38.38.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">-</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.38.38.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.45.45\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r ltx_border_t\" id=\"S4.T1.45.45.8\" style=\"padding-left:8.0pt;padding-right:8.0pt;\">ARMADA</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.39.39.1\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.39.39.1.1\">0.391</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.40.40.2\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.40.40.2.1\">0.386</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r ltx_border_t\" id=\"S4.T1.41.41.3\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.41.41.3.1\">0.389</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.42.42.4\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.42.42.4.1\">0.588</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_rr ltx_border_t\" id=\"S4.T1.43.43.5\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.43.43.5.1\">0.621</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.44.44.6\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.44.44.6.1\">0.835</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T1.45.45.7\" style=\"padding-left:8.0pt;padding-right:8.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T1.45.45.7.1\">0.975</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Results of Precision, Recall, and F<sub class=\"ltx_sub\" id=\"S4.T1.49.1\">1</sub> on iNaturalist dataset for image classification (left part) and results of textual similarity on VQA v2.0 dataset for visual question answering (right part). The foundation model is LLaVA-1.5 for VQA.</figcaption>\n</figure>",
|
| 137 |
+
"capture": "Table 1: Results of Precision, Recall, and F1 on iNaturalist dataset for image classification (left part) and results of textual similarity on VQA v2.0 dataset for visual question answering (right part). The foundation model is LLaVA-1.5 for VQA."
|
| 138 |
+
},
|
| 139 |
+
"2": {
|
| 140 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.46\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.46.47.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_tt\" id=\"S4.T2.46.47.1.1\" rowspan=\"2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S4.T2.46.47.1.1.1\">Method</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" colspan=\"3\" id=\"S4.T2.46.47.1.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.46.47.1.2.1\">Image Retrieval</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" colspan=\"3\" id=\"S4.T2.46.47.1.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.46.47.1.3.1\">Text Retrieval</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S4.T2.46.47.1.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.46.47.1.4.1\">Image Captioning</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.6.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.2.2.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.3.3.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.4.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.5.5.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T2.6.6.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.6.6.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T2.6.6.7.1\">USE</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.6.6.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T2.6.6.8.1\">BERTScore</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.14.14\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T2.14.14.9\" style=\"padding-left:9.0pt;padding-right:9.0pt;\">Zero-shot</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.7.7.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.8.8.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.9.9.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.10.10.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.11.11.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S4.T2.12.12.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.13.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.14.14.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.22.22\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T2.22.22.9\" style=\"padding-left:9.0pt;padding-right:9.0pt;\">NoAug</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.15.15.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.16.16.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.17.17.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.18.18.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.19.19.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T2.20.20.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.21.21.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.22.22.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.30.30\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T2.30.30.9\" style=\"padding-left:9.0pt;padding-right:9.0pt;\">NaiveAug</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.23.23.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.24.24.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.25.25.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.26.26.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.27.27.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T2.28.28.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.29.29.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.30.30.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.38.38\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T2.38.38.9\" style=\"padding-left:9.0pt;padding-right:9.0pt;\">MixGen</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.31.31.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.32.32.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.33.33.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.34.34.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.35.35.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T2.36.36.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.37.37.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.38.38.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.46.46\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r ltx_border_t\" id=\"S4.T2.46.46.9\" style=\"padding-left:9.0pt;padding-right:9.0pt;\">ARMADA</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.39.39.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.39.39.1.1\">0.646</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.40.40.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.40.40.2.1\">0.797</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r ltx_border_t\" id=\"S4.T2.41.41.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.41.41.3.1\">0.847</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.42.42.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.42.42.4.1\">0.646</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.43.43.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.43.43.5.1\">0.811</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_rr ltx_border_t\" id=\"S4.T2.44.44.6\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.44.44.6.1\">0.872</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.45.45.7\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.45.45.7.1\">0.682</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.T2.46.46.8\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"S4.T2.46.46.8.1\">0.918</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Results of Recall@K for image-text retrieval (left part) and results of textual similarity for image captioning (right part). We use Flickr30k dataset for both tasks. The foundation model is CLIP for image-text retrieval and LLaVA-1.5 for image captioning.</figcaption>\n</figure>",
|
| 141 |
+
"capture": "Table 2: Results of Recall@K for image-text retrieval (left part) and results of textual similarity for image captioning (right part). We use Flickr30k dataset for both tasks. The foundation model is CLIP for image-text retrieval and LLaVA-1.5 for image captioning."
|
| 142 |
+
},
|
| 143 |
+
"3": {
|
| 144 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A1.T3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A1.T3.19\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A1.T3.19.20.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_tt\" id=\"A1.T3.19.20.1.1\" rowspan=\"2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"><span class=\"ltx_text\" id=\"A1.T3.19.20.1.1.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_tt\" id=\"A1.T3.19.20.1.2\" rowspan=\"2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"><span class=\"ltx_text\" id=\"A1.T3.19.20.1.2.1\">Metric</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" colspan=\"4\" id=\"A1.T3.19.20.1.3\" style=\"padding-left:13.0pt;padding-right:13.0pt;\">Size of the augmented data</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.1.1\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.2.2.2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.3.3.3\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.4.4.4\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.9.9\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"A1.T3.9.9.6\" style=\"padding-left:13.0pt;padding-right:13.0pt;\">iNaturalist</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"A1.T3.5.5.1\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.6.6.2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.7.7.3\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"A1.T3.7.7.3.1\">0.389</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.8.8.4\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.9.9.5\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.14.14\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"A1.T3.14.14.6\" style=\"padding-left:13.0pt;padding-right:13.0pt;\">Flickr30k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"A1.T3.10.10.1\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></th>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.11.11.2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.12.12.3\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"A1.T3.12.12.3.1\">0.682</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.13.13.4\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.14.14.5\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.19.19\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"A1.T3.19.19.6\" style=\"padding-left:13.0pt;padding-right:13.0pt;\">VQA v2.0</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"A1.T3.15.15.1\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.16.16.2\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.17.17.3\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.18.18.4\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"><span class=\"ltx_text ltx_markedasmath ltx_font_bold\" id=\"A1.T3.18.18.4.1\">0.835</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.19.19.5\" style=\"padding-left:13.0pt;padding-right:13.0pt;\"></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>The impact of the size of the generated data on the performance of multiple tasks.</figcaption>\n</figure>",
|
| 145 |
+
"capture": "Table 3: The impact of the size of the generated data on the performance of multiple tasks."
|
| 146 |
+
}
|
| 147 |
+
},
|
| 148 |
+
"image_paths": {
|
| 149 |
+
"1": {
|
| 150 |
+
"figure_path": "2408.10086v1_figure_1.png",
|
| 151 |
+
"caption": "Figure 1: Generated examples using two previous data augmentation methods and our approach. (a) is generated by TrivialAugment (M\u00fcller and Hutter, 2021), showing the altered images from randomly solarizing or cropping the dog and the fence out from the original image, demonstrating semantic inconsistency. (b) shows the output image from MixGen (Hao et al., 2023), demonstrating the unrealistic output from simple image interpolation and text concatenation.\n(c) shows the augmented data from our method ARMADA, which are semantically consistent.",
|
| 152 |
+
"url": "http://arxiv.org/html/2408.10086v1/x1.png"
|
| 153 |
+
},
|
| 154 |
+
"2": {
|
| 155 |
+
"figure_path": "2408.10086v1_figure_2.png",
|
| 156 |
+
"caption": "Figure 2: The overall framework of our data augmentation method. Given an image-text pair as input, we first extract entities and their corresponding visual attributes from text. If the object can be linked to an entity in our pre-defined attribute knowledge base, then we collect all possible attribute values from the information of the linked entity. If the object cannot be linked to the knowledge base, then we utilize Large Language Models (LLMs) to extract other possible values. After selecting which visual attribute to modify, we rewrite the original text and use an image editing model to generate new images based on the new text. Finally, we rank the augmented data and output data based on the similarity scores.",
|
| 157 |
+
"url": "http://arxiv.org/html/2408.10086v1/x2.png"
|
| 158 |
+
},
|
| 159 |
+
"3": {
|
| 160 |
+
"figure_path": "2408.10086v1_figure_3.png",
|
| 161 |
+
"caption": "Figure 3: An example from the our pre-defined attribute library. Each node represents an entity collected from Wikidata. An outgoing edge is connected from a node to its parent category. Each node has its visual attributes extracted from the Wikipedia articles.",
|
| 162 |
+
"url": "http://arxiv.org/html/2408.10086v1/x3.png"
|
| 163 |
+
},
|
| 164 |
+
"4": {
|
| 165 |
+
"figure_path": "2408.10086v1_figure_4.png",
|
| 166 |
+
"caption": "Figure 4: A case analysis that shows sample outputs on Flickr30k dataset for image captioning task. We select two images from the test set, the human-annotated captions, and the generated captions from each method. For the image on the left, our method is able to recognize the fine-grained concept karate. The image on the right demonstrates that the model is able to provide an accurate description of the hat, specifying its knit texture and beer logo pattern.",
|
| 167 |
+
"url": "http://arxiv.org/html/2408.10086v1/x4.png"
|
| 168 |
+
}
|
| 169 |
+
},
|
| 170 |
+
"validation": true,
|
| 171 |
+
"references": [
|
| 172 |
+
{
|
| 173 |
+
"1": {
|
| 174 |
+
"title": "Gpt-4 technical report.",
|
| 175 |
+
"author": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023.",
|
| 176 |
+
"venue": "arXiv preprint arXiv:2303.08774.",
|
| 177 |
+
"url": null
|
| 178 |
+
}
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"2": {
|
| 182 |
+
"title": "Falcon-40B: an open large language model with state-of-the-art performance.",
|
| 183 |
+
"author": "Ebtesam Almazrouei, Hamza Alobeidli, Abdulaziz Alshamsi, Alessandro Cappelli, Ruxandra Cojocaru, Merouane Debbah, Etienne Goffinet, Daniel Heslow, Julien Launay, Quentin Malartic, Badreddine Noune, Baptiste Pannier, and Guilherme Penedo. 2023.",
|
| 184 |
+
"venue": null,
|
| 185 |
+
"url": null
|
| 186 |
+
}
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"3": {
|
| 190 |
+
"title": "Instructpix2pix: Learning to follow image editing instructions.",
|
| 191 |
+
"author": "Tim Brooks, Aleksander Holynski, and Alexei A. Efros. 2023.",
|
| 192 |
+
"venue": "Preprint, arXiv:2211.09800.",
|
| 193 |
+
"url": "https://arxiv.org/abs/2211.09800"
|
| 194 |
+
}
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"4": {
|
| 198 |
+
"title": "Universal sentence encoder.",
|
| 199 |
+
"author": "Daniel Cer, Yinfei Yang, Sheng yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St. John, Noah Constant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018.",
|
| 200 |
+
"venue": "Preprint, arXiv:1803.11175.",
|
| 201 |
+
"url": "https://arxiv.org/abs/1803.11175"
|
| 202 |
+
}
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"5": {
|
| 206 |
+
"title": "Minprompt: Graph-based minimal prompt data augmentation for few-shot question answering.",
|
| 207 |
+
"author": "Xiusi Chen, Jyun-Yu Jiang, Wei-Cheng Chang, Cho-Jui Hsieh, Hsiang-Fu Yu, and Wei Wang. 2023.",
|
| 208 |
+
"venue": "Preprint, arXiv:2310.05007.",
|
| 209 |
+
"url": "https://arxiv.org/abs/2310.05007"
|
| 210 |
+
}
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"6": {
|
| 214 |
+
"title": "Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality.",
|
| 215 |
+
"author": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. 2023.",
|
| 216 |
+
"venue": null,
|
| 217 |
+
"url": "https://lmsys.org/blog/2023-03-30-vicuna/"
|
| 218 |
+
}
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"7": {
|
| 222 |
+
"title": "Instructblip: Towards general-purpose vision-language models with instruction tuning. arxiv 2023.",
|
| 223 |
+
"author": "W Dai, J Li, D Li, AMH Tiong, J Zhao, W Wang, B Li, P Fung, and S Hoi. 2023.",
|
| 224 |
+
"venue": "arXiv preprint arXiv:2305.06500.",
|
| 225 |
+
"url": null
|
| 226 |
+
}
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"8": {
|
| 230 |
+
"title": "Acm code of ethics and professional conduct.",
|
| 231 |
+
"author": "DW Gotterbarn, Bo Brinkman, Catherine Flick, Michael S Kirkpatrick, Keith Miller, Kate Vazansky, and Marty J Wolf. 2018.",
|
| 232 |
+
"venue": null,
|
| 233 |
+
"url": null
|
| 234 |
+
}
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"9": {
|
| 238 |
+
"title": "Making the V in VQA matter: Elevating the role of image understanding in Visual Question Answering.",
|
| 239 |
+
"author": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. 2017.",
|
| 240 |
+
"venue": "In Conference on Computer Vision and Pattern Recognition (CVPR).",
|
| 241 |
+
"url": null
|
| 242 |
+
}
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"10": {
|
| 246 |
+
"title": "Mixgen: A new multi-modal data augmentation.",
|
| 247 |
+
"author": "Xiaoshuai Hao, Yi Zhu, Srikar Appalaraju, Aston Zhang, Wanqian Zhang, Bo Li, and Mu Li. 2023.",
|
| 248 |
+
"venue": "Preprint, arXiv:2206.08358.",
|
| 249 |
+
"url": "https://arxiv.org/abs/2206.08358"
|
| 250 |
+
}
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"11": {
|
| 254 |
+
"title": "Gans trained by a two time-scale update rule converge to a local nash equilibrium.",
|
| 255 |
+
"author": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. 2018.",
|
| 256 |
+
"venue": "Preprint, arXiv:1706.08500.",
|
| 257 |
+
"url": "https://arxiv.org/abs/1706.08500"
|
| 258 |
+
}
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"12": {
|
| 262 |
+
"title": "spaCy: Industrial-strength Natural Language Processing in Python.",
|
| 263 |
+
"author": "Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020.",
|
| 264 |
+
"venue": null,
|
| 265 |
+
"url": "https://doi.org/10.5281/zenodo.1212303"
|
| 266 |
+
}
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"13": {
|
| 270 |
+
"title": "The inaturalist species classification and detection dataset.",
|
| 271 |
+
"author": "Grant Van Horn, Oisin Mac Aodha, Yang Song, Yin Cui, Chen Sun, Alex Shepard, Hartwig Adam, Pietro Perona, and Serge Belongie. 2018.",
|
| 272 |
+
"venue": "Preprint, arXiv:1707.06642.",
|
| 273 |
+
"url": "https://arxiv.org/abs/1707.06642"
|
| 274 |
+
}
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"14": {
|
| 278 |
+
"title": "Aeda: An easier data augmentation technique for text classification.",
|
| 279 |
+
"author": "Akbar Karimi, Leonardo Rossi, and Andrea Prati. 2021.",
|
| 280 |
+
"venue": "Preprint, arXiv:2108.13230.",
|
| 281 |
+
"url": "https://arxiv.org/abs/2108.13230"
|
| 282 |
+
}
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"15": {
|
| 286 |
+
"title": "Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation.",
|
| 287 |
+
"author": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022a.",
|
| 288 |
+
"venue": "Preprint, arXiv:2201.12086.",
|
| 289 |
+
"url": "https://arxiv.org/abs/2201.12086"
|
| 290 |
+
}
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"16": {
|
| 294 |
+
"title": "Clip-event: Connecting text and images with event structures.",
|
| 295 |
+
"author": "Manling Li, Ruochen Xu, Shuohang Wang, Luowei Zhou, Xudong Lin, Chenguang Zhu, Michael Zeng, Heng Ji, and Shih-Fu Chang. 2022b.",
|
| 296 |
+
"venue": "In Proc. Conference on Computer Vision and Pattern Recognition (CVPR2022).",
|
| 297 |
+
"url": null
|
| 298 |
+
}
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"17": {
|
| 302 |
+
"title": "Cross-media structured common space for multimedia event extraction.",
|
| 303 |
+
"author": "Manling Li, Alireza Zareian, Qi Zeng, Spencer Whitehead, Di Lu, Heng Ji, and Shih-Fu Chang. 2020.",
|
| 304 |
+
"venue": "In Proc. The 58th Annual Meeting of the Association for Computational Linguistics (ACL2020).",
|
| 305 |
+
"url": null
|
| 306 |
+
}
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"18": {
|
| 310 |
+
"title": "Improved baselines with visual instruction tuning.",
|
| 311 |
+
"author": "Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. 2023a.",
|
| 312 |
+
"venue": "Preprint, arXiv:2310.03744.",
|
| 313 |
+
"url": "https://arxiv.org/abs/2310.03744"
|
| 314 |
+
}
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"19": {
|
| 318 |
+
"title": "Visual instruction tuning.",
|
| 319 |
+
"author": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023b.",
|
| 320 |
+
"venue": "Preprint, arXiv:2304.08485.",
|
| 321 |
+
"url": "https://arxiv.org/abs/2304.08485"
|
| 322 |
+
}
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"20": {
|
| 326 |
+
"title": "Learning multimodal data augmentation in feature space.",
|
| 327 |
+
"author": "Zichang Liu, Zhiqiang Tang, Xingjian Shi, Aston Zhang, Mu Li, Anshumali Shrivastava, and Andrew Gordon Wilson. 2022.",
|
| 328 |
+
"venue": "arXiv preprint arXiv:2212.14453.",
|
| 329 |
+
"url": null
|
| 330 |
+
}
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"21": {
|
| 334 |
+
"title": "Reasoning on graphs: Faithful and interpretable large language model reasoning.",
|
| 335 |
+
"author": "LINHAO LUO, Yuan-Fang Li, Reza Haf, and Shirui Pan. 2023.",
|
| 336 |
+
"venue": "In The Twelfth International Conference on Learning Representations.",
|
| 337 |
+
"url": null
|
| 338 |
+
}
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"22": {
|
| 342 |
+
"title": "Camdiff: Camouflage image augmentation via diffusion model.",
|
| 343 |
+
"author": "Xue-Jing Luo, Shuo Wang, Zongwei Wu, Christos Sakaridis, Yun Cheng, Deng-Ping Fan, and Luc Van Gool. 2023.",
|
| 344 |
+
"venue": "Preprint, arXiv:2304.05469.",
|
| 345 |
+
"url": "https://arxiv.org/abs/2304.05469"
|
| 346 |
+
}
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"23": {
|
| 350 |
+
"title": "Trivialaugment: Tuning-free yet state-of-the-art data augmentation.",
|
| 351 |
+
"author": "Samuel G M\u00fcller and Frank Hutter. 2021.",
|
| 352 |
+
"venue": "In Proceedings of the IEEE/CVF international conference on computer vision, pages 774\u2013782.",
|
| 353 |
+
"url": null
|
| 354 |
+
}
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"24": {
|
| 358 |
+
"title": "Trivialaugment: Tuning-free yet state-of-the-art data augmentation.",
|
| 359 |
+
"author": "Samuel G. M\u00fcller and Frank Hutter. 2021.",
|
| 360 |
+
"venue": "Preprint, arXiv:2103.10158.",
|
| 361 |
+
"url": "https://arxiv.org/abs/2103.10158"
|
| 362 |
+
}
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"25": {
|
| 366 |
+
"title": "Gpt-4 technical report.",
|
| 367 |
+
"author": "R OpenAI. 2023.",
|
| 368 |
+
"venue": "ArXiv, 2303.",
|
| 369 |
+
"url": null
|
| 370 |
+
}
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"26": {
|
| 374 |
+
"title": "Unifying large language models and knowledge graphs: A roadmap.",
|
| 375 |
+
"author": "Shirui Pan, Linhao Luo, Yufei Wang, Chen Chen, Jiapu Wang, and Xindong Wu. 2024.",
|
| 376 |
+
"venue": "IEEE Transactions on Knowledge and Data Engineering.",
|
| 377 |
+
"url": null
|
| 378 |
+
}
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"27": {
|
| 382 |
+
"title": "Learning transferable visual models from natural language supervision.",
|
| 383 |
+
"author": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021.",
|
| 384 |
+
"venue": "Preprint, arXiv:2103.00020.",
|
| 385 |
+
"url": "https://arxiv.org/abs/2103.00020"
|
| 386 |
+
}
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"28": {
|
| 390 |
+
"title": "Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph.",
|
| 391 |
+
"author": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel Ni, Heung-Yeung Shum, and Jian Guo. 2023.",
|
| 392 |
+
"venue": "In The Twelfth International Conference on Learning Representations.",
|
| 393 |
+
"url": null
|
| 394 |
+
}
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"29": {
|
| 398 |
+
"title": "Augmented sbert: Data augmentation method for improving bi-encoders for pairwise sentence scoring tasks.",
|
| 399 |
+
"author": "Nandan Thakur, Nils Reimers, Johannes Daxenberger, and Iryna Gurevych. 2021.",
|
| 400 |
+
"venue": "Preprint, arXiv:2010.08240.",
|
| 401 |
+
"url": "https://arxiv.org/abs/2010.08240"
|
| 402 |
+
}
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"30": {
|
| 406 |
+
"title": "Llama: Open and efficient foundation language models.",
|
| 407 |
+
"author": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023.",
|
| 408 |
+
"venue": "Preprint, arXiv:2302.13971.",
|
| 409 |
+
"url": "https://arxiv.org/abs/2302.13971"
|
| 410 |
+
}
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"31": {
|
| 414 |
+
"title": "Effective data augmentation with diffusion models.",
|
| 415 |
+
"author": "Brandon Trabucco, Kyle Doherty, Max Gurinas, and Ruslan Salakhutdinov. 2023.",
|
| 416 |
+
"venue": "Preprint, arXiv:2302.07944.",
|
| 417 |
+
"url": "https://arxiv.org/abs/2302.07944"
|
| 418 |
+
}
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"32": {
|
| 422 |
+
"title": "Wikidata: a free collaborative knowledgebase.",
|
| 423 |
+
"author": "Denny Vrande\u010di\u0107 and Markus Kr\u00f6tzsch. 2014.",
|
| 424 |
+
"venue": "Commun. ACM, 57(10):78\u201385.",
|
| 425 |
+
"url": "https://doi.org/10.1145/2629489"
|
| 426 |
+
}
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"33": {
|
| 430 |
+
"title": "Towards reporting bias in visual-language datasets: bimodal augmentation by decoupling object-attribute association.",
|
| 431 |
+
"author": "Qiyu Wu, Mengjie Zhao, Yutong He, Lang Huang, Junya Ono, Hiromi Wakaki, and Yuki Mitsufuji. 2023.",
|
| 432 |
+
"venue": "Preprint, arXiv:2310.01330.",
|
| 433 |
+
"url": "https://arxiv.org/abs/2310.01330"
|
| 434 |
+
}
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"34": {
|
| 438 |
+
"title": "Gpt3mix: Leveraging large-scale language models for text augmentation.",
|
| 439 |
+
"author": "Kang Min Yoo, Dongju Park, Jaewook Kang, Sang-Woo Lee, and Woomyeong Park. 2021.",
|
| 440 |
+
"venue": "Preprint, arXiv:2104.08826.",
|
| 441 |
+
"url": "https://arxiv.org/abs/2104.08826"
|
| 442 |
+
}
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"35": {
|
| 446 |
+
"title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions.",
|
| 447 |
+
"author": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hockenmaier. 2014.",
|
| 448 |
+
"venue": "Transactions of the Association for Computational Linguistics, 2:67\u201378.",
|
| 449 |
+
"url": "https://doi.org/10.1162/tacl_a_00166"
|
| 450 |
+
}
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"36": {
|
| 454 |
+
"title": "Bertscore: Evaluating text generation with bert.",
|
| 455 |
+
"author": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020.",
|
| 456 |
+
"venue": "Preprint, arXiv:1904.09675.",
|
| 457 |
+
"url": "https://arxiv.org/abs/1904.09675"
|
| 458 |
+
}
|
| 459 |
+
}
|
| 460 |
+
],
|
| 461 |
+
"url": "http://arxiv.org/html/2408.10086v1"
|
| 462 |
+
}
|
20240819/2408.10153v1.json
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Structure-preserving Image Translation for Depth Estimation in Colonoscopy",
|
| 3 |
+
"abstract": "Monocular depth estimation in colonoscopy video aims to overcome the unusual lighting properties of the colonoscopic environment. One of the major challenges in this area is the domain gap between annotated but unrealistic synthetic data and unannotated but realistic clinical data. Previous attempts to bridge this domain gap directly target the depth estimation task itself. We propose a general pipeline of structure-preserving synthetic-to-real (sim2real) image translation (producing a modified version of the input image) to retain depth geometry through the translation process.\nThis allows us to generate large quantities of realistic-looking synthetic images for supervised depth estimation with improved generalization to the clinical domain. We also propose a dataset of hand-picked sequences from clinical colonoscopies to improve the image translation process. We demonstrate the simultaneous realism of the translated images and preservation of depth maps via the performance of downstream depth estimation on various datasets.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Colorectal cancer (CRC) is one of the leading causes of cancer mortality in the United States; the American Cancer Society estimates that there will be over 150,000 new cases and 50,000 deaths in 2024. Increased screening is one of the factors contributing to reductions in mortality [13 ###reference_b13###]. Optical colonoscopy is the gold standard method for CRC screening but its effectiveness is highly dependent upon the skill of the physician performing the examination [9 ###reference_b9###]. Around 20% of potentially pre-cancerous polyps are missed during colonoscopies [12 ###reference_b12###][14 ###reference_b14###].\n3D reconstruction from optical colonoscopy video can improve efficacy via guidance and visualization to the physician, automatic measurements, and autonomous navigation.\nOne of the major challenges in this area is the lack of realistic data suitable for training neural networks to perform depth and pose estimation. While synthetic [10 ###reference_b10###] and phantom [2 ###reference_b2###] datasets exist, they do not accurately represent the reflectance properties of in vivo tissue. Previous approaches towards closing the domain gap [8 ###reference_b8###][11 ###reference_b11###][15 ###reference_b15###] do not target challenging viewpoints making up the majority of colonoscopy videos. In this work, we propose an image translation method that generates realistic-looking video frames from synthetic colonoscopies while preserving the depth information and without requiring complex modeling of mucus and in vivo tissue. In this way, we are able to bridge the gap between unrealistic synthetic data with dense ground truth depth annotation and realistic but un-annotated clinical data to improve depth estimation on unseen clinical data. In addition, we introduce two new datasets of manually selected frames from clinical colonoscopies representing viewpoints that are particularly challenging for depth estimation and downstream reconstruction. This data both improves the realism of our image translation results and provides a dataset against which to test the quality of depth estimation results.\nCode is available at github.com/sherry97/struct-preserving-cyclegan ###reference_egan###\nand data at endoscopography.web.unc.edu ###reference_ography.web.unc.edu###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "Prior datasets targeting reconstruction from colonoscopy come from clinical procedures (EndoMapper [1 ###reference_b1###], Colon10K [7 ###reference_b7###]), fully synthetic procedures (SimCol3D [10 ###reference_b10###], Zhang et al. [18 ###reference_b18###]), or robotic colonoscopy of a silicone phantom model of the colon (C3VD [2 ###reference_b2###]). Clinical data by nature does not have per-frame depth or pose annotations; while synthetic and phantom data have such annotations, the geometry and light reflectance properties of living tissue is challenging to replicate synthetically and therefore the textures present in the synthetic and phantom data are notably different from those observed in clinical practice (Fig. 1 ###reference_###). While the use of image translation to bridge the synthetic to clinical domain gap has been addressed previously (Sec. 2.1 ###reference_###), we propose a general modular framework particularly targeting depth estimation (Sec. 2.2 ###reference_###) on challenging viewpoints.\nThis is the first work that performs structure-preserving image translation from the synthetic to clinical colonoscopy domain without requiring a pre-trained depth estimator or feature extractor in the target clinical domain.\n###figure_1### ###figure_2### ###figure_3### ###figure_4### ###figure_5### ###figure_6###"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Domain gap",
|
| 21 |
+
"text": "Using image translation for colonoscopic depth estimation, Rau et al. [11 ###reference_b11###] propose image-to-depth translation to directly estimate depths from images. In contrast, Mahmood and Durr [8 ###reference_b8###] combine synthetic depth estimation with real-to-synthetic image translation at inference.\nFor other tasks, Chen et al. [3 ###reference_b3###] propose a structure-preserving image-to-image generative adversarial network (GAN) to improve segmentation using mutual information in the latent encoding. Similarly, Yoon et al. [17 ###reference_b17###] propose using GAN-based dataset augmentation to boost performance.\nFor general-purpose image translation, many previous works build upon CycleGAN [19 ###reference_b19###] due to the structure preservation implicit in the cyclical architecture.\nCheng et al. [4 ###reference_b4###] present a structure-preserving alternative that decomposes style (extracted via a pretrained autoencoder) from structure (extracted via a pretrained monocular depth estimator)."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Depth estimation",
|
| 27 |
+
"text": "In order to demonstrate the effectiveness of our image translation approach, we use performance on monocular depth estimation as the metric for comparison. Wang et al. [15 ###reference_b15###] propose a self-supervised extension of Monodepth2 [6 ###reference_b6###] for the colonoscopy domain with an iterative refinement step. For general depth estimation, modern Transformer-based methods [5 ###reference_b5###][16 ###reference_b16###] demonstrate high-quality depth estimation results on non-medical data but rely on large training datasets."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Data",
|
| 33 |
+
"text": "Generally, we can categorize the viewpoint of a single frame as axial, oblique, or en face (Fig. 1(b) ###reference_sf2###). Oblique and en face viewpoints can be challenging for depth estimation due to the lack of strong geometric features. However, they make up about 70% of non-obfuscated frames within a colonoscopy video so reliable depth estimation from these views and their subsequent incorporation into reconstruction provides significant additional information about surface geometry over reconstruction from axial views alone.\nIn this work, we introduce two distinct datasets: the first of oblique views and the second of en face views. Both consist of sequences of consecutive frames manually selected from a library of video recordings of full colonoscopy procedures. The datasets have been curated on the basis of the viewpoint of each frame such that a sequence extends as long as each consecutive frame is of the same viewpoint category modulo gaps of up to 30 consecutive frames with excessive obfuscation (e.g. water drops on the lens).\nAll frames are pre-processed in the same manner. Using computed camera intrinsics and the Matlab undistortFisheyeImage function, we warp fisheye projection into a pinhole projection. We then crop the image to remove the unused image area and resize to pixels. The original videos were recorded using CF and PCF series Olympus colonoscopes with a raw image size of .\nThe UNC Office of Human Research Ethics has determined that this work does not constitute human subject research and does not require Internal Review Board approval."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Methods",
|
| 39 |
+
"text": "We demonstrate the realism of the image translation result and effectiveness of our proposed structure-preserving loss term via downstream depth estimation. Fig. 2 ###reference_### illustrates our framework. Our image translation result is additionally improved with the use of our proposed data over pre-existing datasets."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Results",
|
| 45 |
+
"text": "###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13### ###figure_14### ###figure_15### ###figure_16### ###figure_17### ###figure_18### ###figure_19### ###figure_20### ###figure_21### ###figure_22### ###figure_23### ###figure_24### ###figure_25### ###figure_26### ###figure_27### ###figure_28### ###figure_29### ###figure_30###"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5.1",
|
| 49 |
+
"parent_section_id": "5",
|
| 50 |
+
"section_name": "Image translation",
|
| 51 |
+
"text": "We find that the translation result (Fig. 3 ###reference_###) has both improved texture realism and retains the overall geometry of the input image. Most notably, the translation adds the specular points missing from SimCol3D without explicit representation. The specularity is distributed in a manner consistent with our expectation that surfaces closer to the camera and having surface normal directions parallel with the viewing direction will exhibit more specular effects than those either farther from the camera or with normal direction different from that of the viewing direction.\nIn Table 1 ###reference_###, we compare translation metrics against translation with (vanilla CycleGAN) and find the metrics support our perception of improved translation results when ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5.2",
|
| 55 |
+
"parent_section_id": "5",
|
| 56 |
+
"section_name": "Depth estimation",
|
| 57 |
+
"text": "We measure depth estimation performance on C3VD [2 ###reference_b2###] for comparison against baseline models due to its better realism compared to other options but note that the textures and geometries represented in that dataset remain different from those observed in clinical practice. Our qualitative assessment of depth predictions on our proposed oblique and full en face datasets demonstrate a notable performance gap on realistic images.\nIn Table 3 ###reference_###, we provide metrics computed after median rescaling to adjust depth scale across models. For zero-shot models (relying on generalization), we find that our framework produces the best performance in most metrics. We also find that the performance is similar across various models and training datasets. We conclude that the performance on this dataset is satisfactory given the architecture and simplicity of the evaluation dataset, and look for a larger performance gap on more challenging clinical frames.\nIn Fig. 5 ###reference_###, we show a few examples of depth estimation using NormDepth and our framework evaluated on images from the proposed oblique test partition (additional examples in Fig. S.6 ###reference_###). We have not used masking to prevent depth distortions at specular points. Overall, we see that NormDepth is biased towards predicting a depth depression near the center of the frame and poor predictions near occlusion boundaries. Meanwhile, the baseline model produces significant and repeated errors in the depth map at specular points. Our proposed model produces the best representation of rounded haustral ridges and better distinction between structures. Compared to Ours and Ours, our model produces depths with stronger discontinuities at occlusion boundaries and overall captures a more nuanced and accurate surface geometry.\nIn Fig. 5 ###reference_###, we show a few examples of depth estimation using NormDepth and our framework on images from the proposed en face dataset (additional examples in Fig. S.7 ###reference_###).\nIn these examples, the bias of NormDepth towards predicting a center depth depression is particularly evident, as are the failures of the baseline model in specular areas.\nDue to the nature of this dataset, there is greater representation of surfaces with strong visual texture from vasculature. Thus we can see that our proposed method has overall improved representation of the overall surface geometry compared to ablations but can also produce distortions to the depth map at regions with strong vascular texture.\n###figure_31### ###figure_32### ###figure_33### ###figure_34### ###figure_35### ###figure_36###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "6",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Conclusions",
|
| 63 |
+
"text": "We have demonstrated that structure-preserving sim2real image translation improves monocular depth estimation in challenging colonoscopic frames. To aid this task, we introduce two datasets of hand-picked sequences from clinical data focusing on viewpoints that are under-represented in existing datasets. The image translation results improve texture realism (especially for specular points) while retaining sufficient depth geometry for successful subsequent training of depth estimator networks. We provide evaluation of depth estimation on C3VD and qualitative evaluations on our proposed datasets, finding significant performance improvements on challenging frames using this framework."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6.1",
|
| 67 |
+
"parent_section_id": "6",
|
| 68 |
+
"section_name": "Limitations and Future Work",
|
| 69 |
+
"text": "Depth distortions in areas with strongly visible vasculature and few geometric features could be ameliorated by incorporating additional data into the translation target. Future work could focus on applying this approach to pose estimation or other (non-Monodepth2) depth estimation architectures."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6.1.1",
|
| 73 |
+
"parent_section_id": "6.1",
|
| 74 |
+
"section_name": "6.1.1 Acknowledgements",
|
| 75 |
+
"text": "We thank Stephen Pizer, Sam Ehrenstein, and Julian Rosenman for helpful discussions. Research funding was provided by Olympus Corporation."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "6.1.2",
|
| 79 |
+
"parent_section_id": "6.1",
|
| 80 |
+
"section_name": "6.1.2 \\discintname",
|
| 81 |
+
"text": "The authors have no competing interests to declare that are relevant to the content of this article."
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"appendix": [],
|
| 85 |
+
"tables": {
|
| 86 |
+
"1": {
|
| 87 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T1.8.2.1\" style=\"font-size:90%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S5.T1.2.1\" style=\"font-size:90%;\">Image translation metrics against oblique dataset. Using helps the model produce images more similar to the distribution of test images.</span></figcaption>\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S5.T1.6\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T1.4.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T1.4.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.4.2.3.1\">Model</span></th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T1.3.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.3.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T1.3.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.3.1.1.1.1.1\">Frechet Inception Distance </span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T1.4.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.4.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T1.4.2.2.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T1.4.2.2.1.1.1\">Kernel Inception Distance </span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T1.5.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T1.5.3.2\">CycleGAN</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T1.5.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.5.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T1.5.3.3.1.1\">2.225</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T1.5.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.5.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T1.5.3.1.1.1\"></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.6.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r\" id=\"S5.T1.6.4.2\">Ours</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r\" id=\"S5.T1.6.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.6.4.3.1\">\n<span class=\"ltx_p\" id=\"S5.T1.6.4.3.1.1\">0.300</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r\" id=\"S5.T1.6.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T1.6.4.1.1\">\n<span class=\"ltx_p\" id=\"S5.T1.6.4.1.1.1\"></span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 88 |
+
"capture": "Table 1: Image translation metrics against oblique dataset. Using helps the model produce images more similar to the distribution of test images."
|
| 89 |
+
},
|
| 90 |
+
"2": {
|
| 91 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T3.13.1.1\" style=\"font-size:90%;\">Table 2</span>: </span><span class=\"ltx_text\" id=\"S5.T3.14.2\" style=\"font-size:90%;\">Ablations on translation target dataset and use of MI loss. All depth estimations use Monodepth2 architecture, varying in input data.</span></figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_figure_panel ltx_align_middle\" id=\"S5.T3.3\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T3.3.4.1\">\n<td class=\"ltx_td ltx_align_justify ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.3.4.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.4.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.4.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.3.4.1.1.1.1.1\">Depth Estimation Input</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.3.4.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.4.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.4.1.2.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.3.4.1.2.1.1.1\">Translation Domain B</span></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.3.4.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.4.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.4.1.3.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.3.4.1.3.1.1.1\">Uses MI Loss</span></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.3.5.2\">\n<td class=\"ltx_td ltx_align_justify ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.3.5.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.5.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.5.2.1.1.1\">Baseline (no translation)</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.3.5.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.5.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.5.2.2.1.1\">-</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.3.5.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.5.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.5.2.3.1.1\">-</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.3.6.3\">\n<td class=\"ltx_td ltx_align_justify ltx_border_l ltx_border_r\" id=\"S5.T3.3.6.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.6.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.6.3.1.1.1\">Ours</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r\" id=\"S5.T3.3.6.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.6.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.6.3.2.1.1\">oblique</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r\" id=\"S5.T3.3.6.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.6.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.6.3.3.1.1\">\u2713</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1\">\n<td class=\"ltx_td ltx_align_justify ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.1.1.1.1.1\">Ours</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.1.1.2.1.1\">oblique</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.1.1.3.1.1\">-</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.2.2\">\n<td class=\"ltx_td ltx_align_justify ltx_border_l ltx_border_r\" id=\"S5.T3.2.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.2.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.2.2.1.1.1\">Ours</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r\" id=\"S5.T3.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.2.2.2.1.1\">Colon10K</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r\" id=\"S5.T3.2.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.2.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.2.2.3.1.1\">\u2713</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.3.3\">\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_l ltx_border_r\" id=\"S5.T3.3.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.3.1.1.1\">Ours</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r\" id=\"S5.T3.3.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.3.2.1.1\">C3VD</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r\" id=\"S5.T3.3.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.3.3.3.1.1\">\u2713</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S5.T3.15.1.1\" style=\"font-size:90%;\">Table 3</span>: </span><span class=\"ltx_text\" id=\"S5.T3.16.2\" style=\"font-size:90%;\">Depth evaluation on C3VD (mm). Best categorical performance highlighted. Multi-shot models train on C3VD while zero-shot rely on generalization. On easy data like C3VD, all experiments perform similarly.</span></figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S5.T3.11\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T3.8.5\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.8.5.6\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.8.5.6.1\">\n<span class=\"ltx_p\" id=\"S5.T3.8.5.6.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.8.5.6.1.1.1\">Category</span></span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.8.5.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.8.5.7.1\">Model</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.4.1.1\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.4.1.1.1\">RMSE</span> \n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.5.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.5.2.2.1\">Abs</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.6.3.3\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.7.4.4\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.8.5.5\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.9.1\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.2\">Monodepth2 <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10153v1#bib.bib5\" title=\"\">5</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.3\">18.640</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.4\">0.297</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.5\">0.490</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.6\">0.731</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.9.1.7\">0.861</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.10.2\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_r\" id=\"S5.T3.11.10.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.11.10.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.11.10.2.1.1.1\">Multi-shot</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.2\">UNet <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10153v1#bib.bib5\" title=\"\">5</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.10.2.3.1\">5.520</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.10.2.4.1\">0.090</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.10.2.5.1\">0.917</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.10.2.6.1\">0.994</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.10.2.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.10.2.7.1\">0.999</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.9.6\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_l ltx_border_r\" id=\"S5.T3.9.6.2\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.1\">Ours\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.3\">7.250</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.4\">0.150</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.5\">0.794</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.6\">0.968</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.9.6.7\">0.996</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.11.3\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.2\">NormDepth <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10153v1#bib.bib15\" title=\"\">15</a>]</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.3\">7.401</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.11.3.4.1\">0.169</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.5\">0.731</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.6\">0.948</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T3.11.11.3.7\">0.997</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.12.4\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_l ltx_border_r\" id=\"S5.T3.11.12.4.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.2\">Baseline</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.3\">9.847</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.4\">0.205</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.5\">0.626</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.6\">0.934</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.12.4.7\">0.991</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.10.7\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_r\" id=\"S5.T3.10.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.10.7.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.10.7.2.1.1\">Zero-shot</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.1\">Ours\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.3\">8.089</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.4\">0.174</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.5\">0.735</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.6\">0.958</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.10.7.7\">0.995</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.8\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_l ltx_border_r\" id=\"S5.T3.11.8.2\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.1\">Ours\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.3\">7.636</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.4\">0.174</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.5\">0.730</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.8.6.1\">0.960</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T3.11.8.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.8.7.1\">0.998</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.11.13.5\">\n<th class=\"ltx_td ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_r\" id=\"S5.T3.11.13.5.1\"></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.2\">Ours</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.13.5.3.1\">7.209</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.4\">0.174</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T3.11.13.5.5.1\">0.738</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.6\">0.948</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T3.11.13.5.7\">0.994</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n</figure>",
|
| 92 |
+
"capture": "Table 2: Ablations on translation target dataset and use of MI loss. All depth estimations use Monodepth2 architecture, varying in input data."
|
| 93 |
+
}
|
| 94 |
+
},
|
| 95 |
+
"image_paths": {
|
| 96 |
+
"1(a)": {
|
| 97 |
+
"figure_path": "2408.10153v1_figure_1(a).png",
|
| 98 |
+
"caption": "(a) Textures from SimCol3D [10] (left), C3VD [2] (center), and proposed oblique dataset (right).\nFigure 1: Sample frames from various datasets.",
|
| 99 |
+
"url": "http://arxiv.org/html/2408.10153v1/x3.jpg"
|
| 100 |
+
},
|
| 101 |
+
"1(b)": {
|
| 102 |
+
"figure_path": "2408.10153v1_figure_1(b).png",
|
| 103 |
+
"caption": "(b) Viewpoint categories in colonoscopy: axial (left, Colon10K [7]), oblique (center), and en face (right).\nFigure 1: Sample frames from various datasets.",
|
| 104 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/data_samples/enface_001_frame023765.jpg"
|
| 105 |
+
},
|
| 106 |
+
"2": {
|
| 107 |
+
"figure_path": "2408.10153v1_figure_2.png",
|
| 108 |
+
"caption": "Figure 2: Image translation framework with image domains A\ud835\udc34Aitalic_A and B\ud835\udc35Bitalic_B, generators G:A\u2192B:\ud835\udc3a\u2192\ud835\udc34\ud835\udc35G:A\\rightarrow Bitalic_G : italic_A \u2192 italic_B and F:B\u2192A:\ud835\udc39\u2192\ud835\udc35\ud835\udc34F:B\\rightarrow Aitalic_F : italic_B \u2192 italic_A, and discriminators DAsubscript\ud835\udc37\ud835\udc34D_{A}italic_D start_POSTSUBSCRIPT italic_A end_POSTSUBSCRIPT and DBsubscript\ud835\udc37\ud835\udc35D_{B}italic_D start_POSTSUBSCRIPT italic_B end_POSTSUBSCRIPT. Let a\u2208A,b\u2208Bformulae-sequence\ud835\udc4e\ud835\udc34\ud835\udc4f\ud835\udc35a\\in A,b\\in Bitalic_a \u2208 italic_A , italic_b \u2208 italic_B denote data samples and let D^asubscript^\ud835\udc37\ud835\udc4e\\hat{D}_{a}over^ start_ARG italic_D end_ARG start_POSTSUBSCRIPT italic_a end_POSTSUBSCRIPT denote the depth map corresponding to sample a\ud835\udc4eaitalic_a. Downstream depth estimation uses output of generator G\u2062(A)\ud835\udc3a\ud835\udc34G(A)italic_G ( italic_A ).",
|
| 109 |
+
"url": "http://arxiv.org/html/2408.10153v1/x4.png"
|
| 110 |
+
},
|
| 111 |
+
"3(a)": {
|
| 112 |
+
"figure_path": "2408.10153v1_figure_3(a).png",
|
| 113 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 114 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B12_FrameBuffer_0087.png"
|
| 115 |
+
},
|
| 116 |
+
"3(b)": {
|
| 117 |
+
"figure_path": "2408.10153v1_figure_3(b).png",
|
| 118 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 119 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B12_FrameBuffer_0087.png"
|
| 120 |
+
},
|
| 121 |
+
"3(c)": {
|
| 122 |
+
"figure_path": "2408.10153v1_figure_3(c).png",
|
| 123 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 124 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 125 |
+
},
|
| 126 |
+
"3(d)": {
|
| 127 |
+
"figure_path": "2408.10153v1_figure_3(d).png",
|
| 128 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 129 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B12_FrameBuffer_0087.png"
|
| 130 |
+
},
|
| 131 |
+
"3(e)": {
|
| 132 |
+
"figure_path": "2408.10153v1_figure_3(e).png",
|
| 133 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 134 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B12_FrameBuffer_1011.png"
|
| 135 |
+
},
|
| 136 |
+
"3(f)": {
|
| 137 |
+
"figure_path": "2408.10153v1_figure_3(f).png",
|
| 138 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 139 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B12_FrameBuffer_1011.png"
|
| 140 |
+
},
|
| 141 |
+
"3(g)": {
|
| 142 |
+
"figure_path": "2408.10153v1_figure_3(g).png",
|
| 143 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 144 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 145 |
+
},
|
| 146 |
+
"3(h)": {
|
| 147 |
+
"figure_path": "2408.10153v1_figure_3(h).png",
|
| 148 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 149 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B12_FrameBuffer_1011.png"
|
| 150 |
+
},
|
| 151 |
+
"3(i)": {
|
| 152 |
+
"figure_path": "2408.10153v1_figure_3(i).png",
|
| 153 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 154 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B13_FrameBuffer_0110.png"
|
| 155 |
+
},
|
| 156 |
+
"3(j)": {
|
| 157 |
+
"figure_path": "2408.10153v1_figure_3(j).png",
|
| 158 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 159 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B13_FrameBuffer_0110.png"
|
| 160 |
+
},
|
| 161 |
+
"3(k)": {
|
| 162 |
+
"figure_path": "2408.10153v1_figure_3(k).png",
|
| 163 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 164 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 165 |
+
},
|
| 166 |
+
"3(l)": {
|
| 167 |
+
"figure_path": "2408.10153v1_figure_3(l).png",
|
| 168 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 169 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B13_FrameBuffer_0110.png"
|
| 170 |
+
},
|
| 171 |
+
"3(m)": {
|
| 172 |
+
"figure_path": "2408.10153v1_figure_3(m).png",
|
| 173 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 174 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B3_FrameBuffer_0914.png"
|
| 175 |
+
},
|
| 176 |
+
"3(n)": {
|
| 177 |
+
"figure_path": "2408.10153v1_figure_3(n).png",
|
| 178 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 179 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B3_FrameBuffer_0914.png"
|
| 180 |
+
},
|
| 181 |
+
"3(o)": {
|
| 182 |
+
"figure_path": "2408.10153v1_figure_3(o).png",
|
| 183 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 184 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 185 |
+
},
|
| 186 |
+
"3(p)": {
|
| 187 |
+
"figure_path": "2408.10153v1_figure_3(p).png",
|
| 188 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 189 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B3_FrameBuffer_0914.png"
|
| 190 |
+
},
|
| 191 |
+
"3(q)": {
|
| 192 |
+
"figure_path": "2408.10153v1_figure_3(q).png",
|
| 193 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 194 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B4_FrameBuffer_0237.png"
|
| 195 |
+
},
|
| 196 |
+
"3(r)": {
|
| 197 |
+
"figure_path": "2408.10153v1_figure_3(r).png",
|
| 198 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 199 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B4_FrameBuffer_0237.png"
|
| 200 |
+
},
|
| 201 |
+
"3(s)": {
|
| 202 |
+
"figure_path": "2408.10153v1_figure_3(s).png",
|
| 203 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 204 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 205 |
+
},
|
| 206 |
+
"3(t)": {
|
| 207 |
+
"figure_path": "2408.10153v1_figure_3(t).png",
|
| 208 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 209 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B4_FrameBuffer_0237.png"
|
| 210 |
+
},
|
| 211 |
+
"3(u)": {
|
| 212 |
+
"figure_path": "2408.10153v1_figure_3(u).png",
|
| 213 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 214 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/Frames_B9_FrameBuffer_1119.png"
|
| 215 |
+
},
|
| 216 |
+
"3(v)": {
|
| 217 |
+
"figure_path": "2408.10153v1_figure_3(v).png",
|
| 218 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 219 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T_Frames_B9_FrameBuffer_1119.png"
|
| 220 |
+
},
|
| 221 |
+
"3(w)": {
|
| 222 |
+
"figure_path": "2408.10153v1_figure_3(w).png",
|
| 223 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 224 |
+
"url": "http://arxiv.org/html/2408.10153v1/"
|
| 225 |
+
},
|
| 226 |
+
"3(x)": {
|
| 227 |
+
"figure_path": "2408.10153v1_figure_3(x).png",
|
| 228 |
+
"caption": "Figure 3: Examples comparing the SimCol3D input frame, our translation, closest image in oblique dataset via SSIM, and translation with vanilla CycleGAN.",
|
| 229 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/translation_demo/T3_Frames_B9_FrameBuffer_1119.png"
|
| 230 |
+
},
|
| 231 |
+
"4(a)": {
|
| 232 |
+
"figure_path": "2408.10153v1_figure_4(a).png",
|
| 233 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 234 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_09-06-42_005_000006_combo.png"
|
| 235 |
+
},
|
| 236 |
+
"4(b)": {
|
| 237 |
+
"figure_path": "2408.10153v1_figure_4(b).png",
|
| 238 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 239 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_15-54-30_002_000000_combo.png"
|
| 240 |
+
},
|
| 241 |
+
"4(c)": {
|
| 242 |
+
"figure_path": "2408.10153v1_figure_4(c).png",
|
| 243 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 244 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_15-54-30_002_000175_combo.png"
|
| 245 |
+
},
|
| 246 |
+
"4(d)": {
|
| 247 |
+
"figure_path": "2408.10153v1_figure_4(d).png",
|
| 248 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 249 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-004_000000_combo.png"
|
| 250 |
+
},
|
| 251 |
+
"4(e)": {
|
| 252 |
+
"figure_path": "2408.10153v1_figure_4(e).png",
|
| 253 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 254 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-001_000058_combo.png"
|
| 255 |
+
},
|
| 256 |
+
"4(f)": {
|
| 257 |
+
"figure_path": "2408.10153v1_figure_4(f).png",
|
| 258 |
+
"caption": "Figure 4: Depth estimation on oblique dataset. Boxes highlight differences. Image translation framework improves monocular depth estimation in general, with best performance using our proposed dataset as translation target.",
|
| 259 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-008_000073_combo.png"
|
| 260 |
+
},
|
| 261 |
+
"5(a)": {
|
| 262 |
+
"figure_path": "2408.10153v1_figure_5(a).png",
|
| 263 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 264 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_11-25-16_002_000548_combo.png"
|
| 265 |
+
},
|
| 266 |
+
"5(b)": {
|
| 267 |
+
"figure_path": "2408.10153v1_figure_5(b).png",
|
| 268 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 269 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_11-25-16_002_000167_combo.png"
|
| 270 |
+
},
|
| 271 |
+
"5(c)": {
|
| 272 |
+
"figure_path": "2408.10153v1_figure_5(c).png",
|
| 273 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 274 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_15-54-30_002_000191_combo.png"
|
| 275 |
+
},
|
| 276 |
+
"5(d)": {
|
| 277 |
+
"figure_path": "2408.10153v1_figure_5(d).png",
|
| 278 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 279 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_09-06-42_005_000133_combo.png"
|
| 280 |
+
},
|
| 281 |
+
"5(e)": {
|
| 282 |
+
"figure_path": "2408.10153v1_figure_5(e).png",
|
| 283 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 284 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug18_09-06-42_005_000264_combo.png"
|
| 285 |
+
},
|
| 286 |
+
"5(f)": {
|
| 287 |
+
"figure_path": "2408.10153v1_figure_5(f).png",
|
| 288 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 289 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_11-25-16_002_000354_combo.png"
|
| 290 |
+
},
|
| 291 |
+
"5(g)": {
|
| 292 |
+
"figure_path": "2408.10153v1_figure_5(g).png",
|
| 293 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 294 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_14-47-06_002_000000_combo.png"
|
| 295 |
+
},
|
| 296 |
+
"5(h)": {
|
| 297 |
+
"figure_path": "2408.10153v1_figure_5(h).png",
|
| 298 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 299 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_14-47-06_002_000182_combo.png"
|
| 300 |
+
},
|
| 301 |
+
"5(i)": {
|
| 302 |
+
"figure_path": "2408.10153v1_figure_5(i).png",
|
| 303 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 304 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_15-40-10_010_000024_combo.png"
|
| 305 |
+
},
|
| 306 |
+
"5(j)": {
|
| 307 |
+
"figure_path": "2408.10153v1_figure_5(j).png",
|
| 308 |
+
"caption": "Figure 6: Depth estimation on additional oblique examples.",
|
| 309 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/Auto_A_Aug25_15-40-10_010_000118_combo.png"
|
| 310 |
+
},
|
| 311 |
+
"6(a)": {
|
| 312 |
+
"figure_path": "2408.10153v1_figure_6(a).png",
|
| 313 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 314 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-001_000031_combo.png"
|
| 315 |
+
},
|
| 316 |
+
"6(b)": {
|
| 317 |
+
"figure_path": "2408.10153v1_figure_6(b).png",
|
| 318 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 319 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-010_000002_combo.png"
|
| 320 |
+
},
|
| 321 |
+
"6(c)": {
|
| 322 |
+
"figure_path": "2408.10153v1_figure_6(c).png",
|
| 323 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 324 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-014_000082_combo.png"
|
| 325 |
+
},
|
| 326 |
+
"6(d)": {
|
| 327 |
+
"figure_path": "2408.10153v1_figure_6(d).png",
|
| 328 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 329 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-005_000000_combo.png"
|
| 330 |
+
},
|
| 331 |
+
"6(e)": {
|
| 332 |
+
"figure_path": "2408.10153v1_figure_6(e).png",
|
| 333 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 334 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-006_000000_combo.png"
|
| 335 |
+
},
|
| 336 |
+
"6(f)": {
|
| 337 |
+
"figure_path": "2408.10153v1_figure_6(f).png",
|
| 338 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 339 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-008_000064_combo.png"
|
| 340 |
+
},
|
| 341 |
+
"6(g)": {
|
| 342 |
+
"figure_path": "2408.10153v1_figure_6(g).png",
|
| 343 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 344 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-012_000013_combo.png"
|
| 345 |
+
},
|
| 346 |
+
"6(h)": {
|
| 347 |
+
"figure_path": "2408.10153v1_figure_6(h).png",
|
| 348 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 349 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-013_000025_combo.png"
|
| 350 |
+
},
|
| 351 |
+
"6(i)": {
|
| 352 |
+
"figure_path": "2408.10153v1_figure_6(i).png",
|
| 353 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 354 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-014_000014_combo.png"
|
| 355 |
+
},
|
| 356 |
+
"6(j)": {
|
| 357 |
+
"figure_path": "2408.10153v1_figure_6(j).png",
|
| 358 |
+
"caption": "Figure 7: Depth estimation on additional en face examples.",
|
| 359 |
+
"url": "http://arxiv.org/html/2408.10153v1/extracted/5800319/figures/depth_demo/enface-014_000071_combo.png"
|
| 360 |
+
}
|
| 361 |
+
},
|
| 362 |
+
"validation": true,
|
| 363 |
+
"references": [],
|
| 364 |
+
"url": "http://arxiv.org/html/2408.10153v1"
|
| 365 |
+
}
|
20240819/2408.10181v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.10285v1.json
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "BatGPT-Chem: A Foundation Large Model For Retrosynthesis Prediction",
|
| 3 |
+
"abstract": "Retrosynthesis analysis is pivotal yet challenging in drug discovery and organic chemistry. Despite the proliferation of computational tools over the past decade, AI-based systems often fall short in generalizing across diverse reaction types and exploring alternative synthetic pathways. This paper presents BatGPT-Chem, a large language model with 15 billion parameters, tailored for enhanced retrosynthesis prediction. Integrating chemical tasks via a unified framework of natural language and SMILES notation, this approach synthesizes extensive instructional data from an expansive chemical database. Employing both autoregressive and bidirectional training techniques across over one hundred million instances, BatGPT-Chem captures a broad spectrum of chemical knowledge, enabling precise prediction of reaction conditions and exhibiting strong zero-shot capabilities. Superior to existing AI methods, our model demonstrates significant advancements in generating effective strategies for complex molecules, as validated by stringent benchmark tests. BatGPT-Chem not only boosts the efficiency and creativity of retrosynthetic analysis but also establishes a new standard for computational tools in synthetic design. This development empowers chemists to adeptly address the synthesis of novel compounds, potentially expediting the innovation cycle in drug manufacturing and materials science. We release our trial platform at https://www.batgpt.net/dapp/chem.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Retrosynthesis analysis [1 ###reference_b1###], which aims to identify a set of precursors given a target molecule, is essential in synthetic chemistry. It plays a vital role in applications like drug design, chemical biology, and material science. However, the extensive range of possible chemical transformations and the incomplete understanding of the chemical reaction mechanisms make retrosynthesis planning an extremely challenging task, even for experienced chemists addressing smaller molecular structures.\nOver recent decades, the development of various computer-aided synthesis planning (CASP) methods has emerged to address these challenges [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###]. Artificial intelligence (AI)-based methods, specifically in reaction modeling frameworks, combined with the increasing availability of extensive synthetic datasets, has facilitated the advancement of data-driven approaches, notably deep learning (DL) models.\nThese approaches have significantly aided chemists, saving considerable time and effort in designing synthetic experiments.\nData-driven retrosynthesis methodologies can be broadly categorized into three types: template-based, template-free, and semi-template-based. A reaction template is essentially a subgraph pattern that illustrates the changes in atoms and bonds between a product molecule and its reactants. Template-based approaches [4 ###reference_b4###, 5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###] incorporate fundamental reactive rules that are either manually defined or derived from chemical reaction datasets, enabling them to delineate molecular transformations during reactions. Typically, these algorithms rely on a repository of reaction templates used to align target molecules and transform product molecules into reactants through pertinent templates. Although template-based methods offer high interpretability and produce chemically coherent reactants with perfect validity, their scope and complexity remain constrained by the limited ability to generate reactions beyond the template library.\nIn contrast, both template-free and semi-template methods are independent of an external template database, thereby enhancing model generalization capabilities. Template-free methods [8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###, 12 ###reference_b12###, 13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###, 16 ###reference_b16###] treat retrosynthesis as a sequential generation problem, transforming products into potential precursors in an end-to-end fashion. Semi-template approaches [17 ###reference_b17###, 18 ###reference_b18###, 19 ###reference_b19###, 20 ###reference_b20###, 21 ###reference_b21###] partition retrosynthesis into two phases: initially identifying the reaction center, i.e., a focal area in a product, to generate intermediate molecules known as synthons, followed by augmenting these synthons to form precursors. Non-template-based methods typically adopt either sequence-based or graph-based approaches. Sequence-based strategies view retrosynthesis as a neural machine translation task, depicting molecules as linear texts, such as simplified molecular input line entry system (SMILES) strings [22 ###reference_b22###], and utilizing neural language models like Transformer [23 ###reference_b23###] for prediction. Graph-based approaches, on the other hand, interpret retrosynthesis through molecular graph structures, deploying models such as directed message passing neural networks to make predictions. Compared to template-based methods,\nLeveraging AI technologies, template-free approaches have demonstrated enhanced potential in both generalization and predictive diversity. This research delves into these methods in particular. While AI-driven methods have made considerable progress in retrosynthesis analysis, they still face certain limitations, as outlined below.\ni) Deficiency in molecular and chemical reaction knowledge. Traditional AI models, constrained by their learning capacities and limited training data, often fail to incorporate comprehensive knowledge from chemical literature. This significantly limits performance enhancements. Although recent studies have applied advanced Large Language Models (LLMs) to chemistry [24 ###reference_b24###, 25 ###reference_b25###, 26 ###reference_b26###] to mitigate this limitation, these models have not been adequately fine-tuned on specific chemical datasets.\nii) Neglect of reaction conditions111In the context of our paper, we use the term \u201creaction conditions\u201d to refer to substrates like solvents and catalysts that do not contribute any atoms to the product. Reaction conditions are not considered as reactants in the context of our paper.. Current models often exclude substances not directly involved in reactions or arbitrarily mix them up with reactants, due to the lack of reaction conditions in training data and difficulties integrating them. This reduction in interpretability and reliability undermines model performance. Yet, chemical reaction success profoundly depends on precise reaction conditions [27 ###reference_b27###] including catalysts, solvents, and other factors that influence reaction rates, yields, and selectivity. Accurate modeling of these conditions is crucial for effective retrosynthesis planning.\niii) Limited zero-shot prediction capability. Typical AI models are trained and tested on data from the same distribution, rendering them ineffective for out-of-distribution predictions, particularly in zero-shot retrosynthesis tasks. This limitation severely restricts their ability to transfer knowledge across different datasets or chemical reaction classes. The problem is particularly pronounced when attempting retrosynthesis across a diverse chemical space [28 ###reference_b28###].\nTo enhance the accuracy, reliability, and generalization of retrosynthetic models and thereby improve their utility in chemical synthesis, we develop the innovative BatGPT-Chem for one-step retrosynthesis prediction. In this work, we leverage the widely-used SMILES notation as a specialized chemical language, integrating it with natural language through the use of Large Language Models (LLMs). By combining open-source and closed-source datasets into a larger-scale instruction-tuning dataset, utilizing prompt templates, we effectively facilitate instruction-tuning for BatGPT-Chem.\nSignificantly, our prompts explicitly include reaction conditions, covering a vast chemical space from various downstream datasets. Building upon our previously developed LLM, BatGPT-15B [29 ###reference_b29###], we expand the model\u2019s vocabulary with specialized chemical terms and further refined it through instruction tuning, culminating in the model of BatGPT-Chem. This model surpasses existing large chemical models [30 ###reference_b30###, 31 ###reference_b31###] in terms of model size and uses a more comprehensive bilingual (Chinese and English) instruction-tuning dataset.\nExperimental results validate that BatGPT-Chem successfully incorporates chemical knowl-\nedge into retrosynthesis prediction, performs well under zero-shot conditions, and explicitly predicts reaction conditions in an end-to-end manner. This extensive capability sets new benchmarks for the application of LLMs in the field of chemical engineering.\nIn summary, this study demonstrates the potential of integrating specialized chemical language with advanced instruction-tuning techniques via a powerful LLM to enhance the accuracy and robustness of retrosynthesis analysis. By learning from large-scale datasets of chemical literature and symbolic strings, along with mastering an enriched chemical-specific vocabulary, BatGPT-Chem interprets the relationship between chemical text descriptions and molecules, which is evidenced by the model\u2019s capability to predict reaction conditions not included in the reaction routes for training. Such advancements emphasize the profound impact that specialized instructional methodologies can have on the evolution of computational chemistry. Moreover, the deployment of BatGPT-Chem on an online server facilitates chemists\u2019 access to sophisticated suggestions for synthesizing novel compounds, potentially expediting the innovation cycle in drug manufacturing and materials science."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Results",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Retrosynthesis prediction benchmark",
|
| 21 |
+
"text": "Given a set of products, the objective is to generate precursors that synthesize the products. Similar to other LLMs, BatGPT-Chem has been trained on extensive data, making it challenging to estimate its zero-shot prediction ability for retrosynthesis planning. To minimize overlap between test and training data and to explore a broader chemical space, we collect and organize eight datasets with various reaction types to establish a new benchmark dataset for retrosynthesis prediction. We take reaction conditions (precursors) into account as much as possible. Details of the benchmark are provided below.\n###figure_1### The Suzuki-Miyaura (SM) dataset [34 ###reference_b34###] contains one product of the Suzuki-Miyaura cross-coupling reactions. There are 5,760 reactions on the combinations of 15 couplings of electrophiles and nucleophiles, 12 ligands (with a blank one), eight bases (with a blank one), and four solvents.\nThe high-throughput experiments Buchwald-Hartwig (HTE BH) dataset [35 ###reference_b35###] contains five products of the Pd-catalyzed Buchwald-Hartwig C-N cross-coupling reactions. There are 3,955 reactions on the combinations of 15 aryl halides, four ligands, three bases, and 23 additives.\nThe electronic laboratory notebooks Buchwald-Hartwig (ELN BH) dataset [28 ###reference_b28###] contains 454 products of the Pd-catalyzed Buchwald-Hartwig C-N cross-coupling reactions. It has 551 reactions with a wider range of reaction space than the HTE BH dataset.\nThe asymmetric allylic alkylation with amine (AAAA) dataset [36 ###reference_b36###] contains 189 products of 273 reactions.\nThe Denmark dataset [37 ###reference_b37###] contains 25 products of the asymmetric -acetal formation using CPA catalysts. There are 1,075 reactions on the combinations of 43 catalysts, five imines, and five thiols.\nThe asymmetric hydrogenation of olefins (AHO) dataset [38 ###reference_b38###] contains 3,147 products. There are 10,268 reactions with 1,686 transition metal catalysts and 2,754 olefin substrates.\nThe metabolites and biochemical reactions (BioChem) dataset [39 ###reference_b39###] comprises 16,838 products specifically curated for the biosynthetic planning of natural products, including a total of 33,687 reactions. It does not contain information about reaction conditions.\nThe USPTO-100 dataset is part of ChemLLMBench [40 ###reference_b40###], containing 100 products (one product for one reaction) randomly sampled from the respective test sets. It does not contain information about reaction conditions.\nWe carefully examine the overlap between the pre-training dataset and the retrosynthesis benchmark. Except for the USPTO-100 dataset, which is a subset of the USPTO dataset with all 100 reactions included in the pre-training dataset, the other datasets have little or no overlap with the pre-training dataset. Without considering reaction conditions, the ELNs BH, AHO, and BioChem datasets contain 3, 77, and 172 reactions from the pre-training dataset, respectively. Considering the reaction conditions, only the BioChem dataset has 53 overlapping reactions with the pre-training dataset. We also check the overlap within the retrosynthesis benchmark itself, finding that only the AAAA and AHO datasets have one and eight reactions from the BioChem dataset, respectively. Given the minimal overlap among the datasets, we believe it is sufficient to estimate the retrosynthesis prediction ability of models under zero-shot conditions without additional processing.\n###figure_2### Fig. 1 ###reference_### presents annotated reaction graphs created using the DRFP [41 ###reference_b41###] fingerprints, with colors corresponding to the eight data sources. Despite slight fragmentation in the TMAP sub-trees due to the large disparity in reaction numbers from different sources, related reaction types are well grouped. For example, in the lower left, the Suzuki-Miyaura reactions are clustered continuously at the edges of the spherical tree, while the -acetal reactions form disjointed sub-trees in the upper region. The various reaction types span a vast chemical reaction space, posing a significant challenge to the model\u2019s generalization performance. Here we introduce two new metrics, coverage of reactants (coverage) and intersection of reaction conditions (intersection), along with MaxFrag and Validity, to thoroughly evaluate retrosynthetic models.\n\nCoverage of reactants, which indicates whether the true reactant molecules are covered by the model outputs, reflecting the accuracy of the reaction predictions. It differs from the coverage of precursors [14 ###reference_b14###], which is related to whether the model can predict at least one valid precursors suggestion at the reaction level.\n\nIntersection of reaction conditions, which measures whether the model predicts any of the true reaction condition molecules. Directly predicting exact reaction conditions is extremely challenging, leading to common metrics such as Exact Match and Coverage being close to zero for most models. To address this, we propose the intersection accuracy metric, which relaxes the requirements and provides a more attainable measure of model performance when considering reaction conditions.\n\nMaxFrag [42 ###reference_b42###], which accesses the ability to identify principal transformation for classical retrosynthesis. Prediction of the largest fragment focusing only on main compound transformations is the minimal information required for an efficient retrosynthesis route.\n\nValidity, which measures how many of the SMILES codes predicted by the model are legal and do not violate chemical principles.\nWe also conduct multiple experiments by adjusting the Top- hyperparameter of the model inference to evaluate its performance further. Note that the prediction is correct for products with multiple synthetic pathways as long as the model successfully predicts any of them."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "BatGPT-Chem achieves exceptional accuracy in identifying reactants",
|
| 27 |
+
"text": "In the MaxFrag score analysis presented in Fig. 2 ###reference_###, BatGPT-Chem achieves state-of-the-art performance across most datasets, except for the ELN BH dataset where it is surpassed by ChemDFM. Falcon and Llama3-8b display significantly weaker performance across all datasets, with Llama3-8b exceeding 10% accuracy only on the HTE BH dataset (achieving 40%). For the complex reaction class of asymmetric -acetal formation from the Denmark dataset, other models fail to identify the largest fragments in any of the 25 reactions, while BatGPT-Chem successfully predicts key fragments in four reactions.\nNotably, in reactions from the AHO dataset, which are relatively simple and primarily involve the addition of hydrogens to reactants to obtain the product, other models exhibit extremely poor performance. In contrast, BatGPT-Chem maintains a high accuracy rate. Among the 10,268 reactions in the AHO dataset, it achieves an accuracy of 56.7%. This accuracy notably exceeds that of ChemDFM by over 30 times, while Falcon and Llama3-8b manage only a 0.4% accuracy rate.\nIn direct synthesis scenarios that require precise identification of all interacting components, we evaluate the predictive performance of models by assessing their ability to cover all reactants in a reaction, i.e., coverage. The results, presented in Table 1 ###reference_###, align with trends seen in the MaxFrag metric, yet with notably lower coverage scores. For instance, BatGPT-Chem\u2019s performance on the ELN BH and USPTO-100 datasets declines from 63.7% to 60.8% and 77.0% to 70.0%, respectively. Coverage score of ChemDFM even drops to 0% on the SM and AAAA datasets. While classical retrosynthesis primarily targets key transformations, the ability to accurately predict additional reactants enhances the completeness of retrosynthetic analyses.\nIn this evaluation, we assess language models (LLMs) on reactant prediction using the MaxFrag and Coverage metrics instead of the traditional exact match Top- accuracy. Many LLMs do not differentiate between reactants and reaction conditions during training, typically using the \u2019.\u2019 notation to segregate molecules within reaction SMILES strings. This convention complicates the automatic extraction of condition components from reaction SMILES, making it difficult to discern reactants from the output strings, and thus potentially rendering the application of the exact match metric unfair."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "BatGPT-Chem capably predicts reaction conditions explicitly",
|
| 33 |
+
"text": "Beyond the identification of reaction reactants, the prediction of appropriate reaction conditions, like catalysts and solvents, remains a crucial and challenging subsequent task [14 ###reference_b14###].\nDue to the significant complexity involved in accurately and comprehensively predicting reaction conditions, LLMs often struggle to yield reasonable results when assessed using Exact Match and MaxFrag metrics. Therefore, here we employ the \u2018Intersection\u2019 metric, as described in Section 2.1 ###reference_###, to more effectively evaluate the model\u2019s capability in predicting reaction conditions.\nWe assess the accuracy of reaction condition predictions across six datasets which contain reaction condition information. As shown in Figure 3 ###reference_###, BatGPT-Chem outperforms other methods by large margins in terms of Intersection rate.\nThis demonstrates BatGPT-Chem\u2019s exceptional ability to predict reaction conditions and complete retrosynthesis routes effectively. However, it is important to note that on the HTE BH and Denmark datasets, all models consistently fail to present any feasible reaction conditions, highlighting the inherent challenges of this task.\n###figure_3### Moreover, as mentioned in Section 2.2 ###reference_###, while many methods do not distinguish between reactants and reaction conditions within their reaction strings, BatGPT-chem\u2019s training corpus separates these two elements with a \u2018\u2019. This distinction enables BatGPT-chem to explicitly predict reaction conditions through prompting, markedly enhancing its capacity to provide comprehensive retrosynthesis routes.\nTo further illustrate the predictive capabilities concerning reaction conditions, we analyze two specific cases: one from the ELN BH dataset and another from the Denmark dataset, comparing BatGPT-Chem with the top-performing baseline, ChemDFM.\nAs shown in Figure 4 ###reference_###a, BatGPT-Chem successfully predicts most conditions, including the catalyst and metal, only missing one reaction condition. In contrast, ChemDFM fails to generate any correct conditions. In this reaction, the catalyst\u2019s structure is relatively complex, and multiple reaction conditions are required for this retrosynthesis pathway. In cases where reaction conditions are exceedingly complex, it becomes even more challenging for models to make accurate predictions. As can be seen in Figure 4 ###reference_###b, BatGPT-Chem covers all reactants but fails to predict the catalyst, whereas ChemDFM correctly identifies only one reactant. Note that BatGPT-Chem occasionally recommends supplementary simple small molecules among the reactants, which have a negligible influence on the determination of the reaction pathway.\n###figure_4### ###figure_5###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.4",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "BatGPT-Chem excels in generating multiple viable retrosynthesis routes",
|
| 39 |
+
"text": "After achieving accurate predictions for reactants and reaction conditions, the focus shifts to evaluating the model\u2019s ability to generate diverse, correct, and even novel predictions. Due to the stochastic nature of LLMs, retrosynthesis predictions sampled from BatGPT-Chem for a fixed product will not be unique. To assess BatGPT-Chem\u2019s capability to propose multiple retrosynthesis routes, we examine four representative products (Fig. 5 ###reference_###a) from the benchmark. We sample the Top-30 predictions for the SM and HTE BH datasets and the Top-10 predictions for the AHO and BioChem datasets.\nFig. 5 ###reference_###b and 5 ###reference_###c display the frequencies and detailed predictions, showcasing BatGPT-Chem\u2019s capacity to provide diverse retrosynthesis routes.\nSpecifically, for the product from the SM dataset, BatGPT-Chem generates seven different predictions, four of which (i to iv) match the ground truths with a frequency ratio of 23/30. The remaining predictions are not in the dataset but still contain valid functional groups. For the product from the HTE BH dataset, BatGPT-Chem provides four different predictions, two of which (i and ii) match the ground truths with a frequency ratio of 26/30. Notably, prediction iii, not included in the original dataset, is found in Reaxys [43 ###reference_b43###, 44 ###reference_b44###] (ID: 39015457), highlighting the model\u2019s capability to predict reactions outside the benchmark dataset.\nFor the product from the AHO dataset, BatGPT-Chem produces three different predictions, two of which (i and ii) match the ground truths with a frequency ratio of 9/10. For this chiral product, BatGPT-Chem successfully predicts all cis-trans isomerism (i and ii) and even provides the SMILES without cis-trans information (iii). For the product from the BioChem dataset, BatGPT-Chem makes four different predictions, two of which (i and ii) match the ground truths with a frequency ratio of 6/10. This is also a chiral product, and BatGPT-Chem accurately predicts different chiral configurations (i) and (ii). Again, BatGPT gives some extra simple small molecules in the reactants for some predictions.\nThese results emphasize BatGPT-Chem\u2019s ability to capture the major backbone of the reactant molecules, provide diverse predictions for retrosynthesis pathways, and predict reactions beyond the benchmark dataset, further demonstrating its robustness and versatility in chemical synthesis planning.\nIn retrosynthesis, the feasibility of synthesizing a single product from various precursors adds complexity to the evaluation of model predictions. For example, in the reaction R-R1+NH3-R-NH2, multiples substituents for R1, such as -OH, -Cl, -Br, -I, or -F are valid, each leading to correct predictions [42 ###reference_b42###].\nThe primary distinction among these options stems from their respective reaction rates and yields.\nWhile the \u2018MaxFrag\u2019 metric does not fully address this issue, it still represents an effort to better manage such data ambiguities during the validation process.\nInstead of devising a new metric, our approach focuses on addressing this challenge from a data-centric perspective to enhance the comprehensiveness of performance evaluation. Specifically, during dataset compilation for model evaluation, we endeavor to gather as many synthetic pathways for the same product as feasible. Subsequently, during the assessment phase, a prediction is deemed successful if it aligns with any of the collected retrosynthesis pathways for that product."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.5",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "BatGPT-Chem generates outputs with high validity",
|
| 45 |
+
"text": "As LLMs sometimes produce SMILES representations of molecules that may not be valid or chemically plausible, we employ RDKit [45 ###reference_b45###] to verify the validity of molecules generated by these models. BatGPT-Chem consistently achieves high validity rates, nearing or reaching 100% across all datasets, thereby confirming its strong grasp of chemical language. Given that chemical symbols can be considered a specialized language domain, models primarily trained on general natural language corpora often fail to fully comprehend it. BatGPT-Chem can avoid tedious post-processing of grammatical corrections [9 ###reference_b9###] to fix the syntax errors of outputs. Furthermore, it successfully interprets the cis-trans and chiral information inherent in chemical language, as illustrated in Fig. 5 ###reference_###c."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Discussion",
|
| 51 |
+
"text": "Large language models (LLMs) have made substantial progress across various fields, demonstrating significant potential to spearhead advancements in AI for Science. Notably, their proficiency in processing sequential data makes them particularly apt for the chemical domain, where common representations such as SMILES also adopt a sequential format. Considering the natural potential of LLMs to learn and predict chemical structures and reactions, we develop BatGPT-Chem, a pioneering large-scale model tailored for retrosynthesis analysis, to address three critical limitations prevalent in existing AI models: i) a deficit in comprehensive molecular and chemical reaction knowledge; ii) oversight of reaction conditions; and iii) inadequate generalization across diverse chemical reactions.\nBatGPT-Chem distinguishes itself by its extensive training corpus, which covers a wide range of chemical literature and chemical string data, i.e., SMILES strings. The implementation of carefully crafted prompt templates and tailored instruction-tuning data during pre-training has significantly enhanced its capacity to decipher both natural and chemical languages. This advancement is reflected in its improved accuracy in predicting reactants and the near-perfect validity of its output. Particularly noteworthy is BatGPT-Chem\u2019s performance on comprehensive benchmark datasets, demonstrating remarkable zero-shot retrosynthesis prediction capacities that hold practical implications for real-world applications.\nA unique aspect of BatGPT-Chem compared to other LLMs is its explicit handling of reaction conditions. By directly extracting reaction conditions from datasets and creating specific prompts to predict them, BatGPT-Chem shows superior ability to elucidate components such as solvents and catalysts.\nWhen predicting reaction conditions, non-generative models can only deal with a fixed set of molecules and are usually modeled as multi-classification or multi-label problems [46 ###reference_b46###, 47 ###reference_b47###, 48 ###reference_b48###, 49 ###reference_b49###], which greatly limits the generalization ability of these models. These methods use a post-processing approach in retrosynthesis, where reaction conditions are predicted after the reactants and products are known. This two-stage processing requires additional training of the model, is not simple enough to use, and makes it more difficult to ensure the stability of the model.\nContrastingly, generative models like BatGPT-Chem can predict a wide variety of reaction condition molecules, are not constrained by a finite set, and can offer novel and heuristic predictions in an end-to-end manner. Since most of the reaction condition information is stored using raw text [50 ###reference_b50###], constructing datasets for machine learning is inherently time-consuming and laborious, including steps such as extracting text, removing errors, and converting to computer-readable sequences [51 ###reference_b51###, 49 ###reference_b49###]. BatGPT-Chem can provide reference reaction conditions for inverse synthesis datasets like BioChem [39 ###reference_b39###], which do not contain information on reaction conditions.\nThis capability can help build more comprehensive and enriched datasets of chemical reactions, facilitating the use of machine learning in chemical reaction modeling.\nMoreover, BatGPT-Chem excels in generating diverse and viable retrosynthesis pathways, providing valuable insights for chemists. Actually, the cases where multiple paths correspond to the same product are rare in the pre-training datasets, thus BatGPT-Chem\u2019s ability to suggest various feasible routes can be attributed to its profound understanding of chemical reaction mechanisms. By optimizing beam search strategies and temperature settings, the model adeptly balances diversity and correctness. In contrast, efforts to enhance output diversity in other LLMs through elevated temperature settings often result in an increased error rate.\nIn conclusion, BatGPT-Chem sets new benchmarks for effective and dependable AI-driven retrosynthesis planning. However, the work is still tempered by the quality of the data, primarily from open-access sources. Despite endeavors to enrich reaction condition data and compile comprehensive retrosynthesis pathways, gaps remain. Future improvements will likely require collective efforts from across the scientific community. Another constraint is the scope of chemical languages covered; currently focused on SMILES, exploring additional string-based representations like SELFIES [52 ###reference_b52###] could broaden our model\u2019s utility, paving the way for its application to a greater spectrum of chemical reactions."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Methods",
|
| 57 |
+
"text": ""
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Unified modeling",
|
| 63 |
+
"text": "Viewing natural language as a specialized language, we can employ LLMs for unified modeling of natural language to SMILES, SMILES to natural language, SMILES to SMILES, and natural language to natural language. This naturally facilitates the completion of various chemistry tasks: Molecule Description, Molecule design, Product Inference, and Retro-synthesis Prediction. Additionally, we have also modeled the Yield Prediction task. We showcase our modeling approach in Fig. 6 ###reference_###. We model molecule description as bidirectional conversions between natural language and SMILES, as well as conversions between natural language. We model molecule design as a conversion from natural language to SMILES. We also model product inference and retro-synthesis prediction as conversions from SMILES to SMILES. Additionally, we have also included a task for yield prediction.\n###figure_6###"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Chemistry tasks and prompt templates",
|
| 69 |
+
"text": "Following the modeling approach outlined above, we focus on the key tasks in the chemistry domain: Molecule Description, Molecule design, Product Inference, Retro-synthesis Prediction, and Yield Prediction. We construct instruction tuning datasets based on existing chemical, drug, and medicine datasets using prompt templates, to train models capable of addressing these tasks.\nRetro-synthesis Prediction. Retro-synthesis Prediction is a crucial task for chemistry. It involves inferring possible reaction pathways and conditions by given product molecules, thereby reverse-predicting the synthetic route to generate the product. Retro-synthesis prediction enables researchers to explore and discover new organic molecular structures more rapidly, which is essential for fields such as organic synthesis chemistry and drug discovery. We train the model\u2019s retro-synthesis prediction capability using two subtasks: 1) Reactant and catalyst prediction: Given a product, predict the potential catalysts and reactants that may be required. 2) Reactant prediction: Given a product and catalyst, predict the reactants.\nProduct Inference. Product inference aims at predicting the products based on given starting materials and specific reaction conditions, which holds significant importance in fields such as organic synthesis and drug design. We train the model\u2019s product inference capability using two subtasks: 1) Product and catalyst prediction: Given reactants, predict the potential catalysts and products that may be involved. 2) Product prediction: Given products and catalysts, predict the reactants involved.\nMolecule Design. Molecule Design is a field involving the creation of new molecules using theoretical and computational methods to produce molecular structures with specific properties or functionalities. This field plays a crucial role in various domains including chemistry, drug design, and materials science. The aim of molecule design is to systematically generate molecules with desired properties and activities to meet specific application needs. This work fully considers over a hundred molecular properties, such as molecule weight, valence electron count, Balaban J value, BertzCT value, number of heavy atoms, number of NHs or OHs, and number of nitrogen and oxygen atoms. It is hoped that the LLM can take into account researchers\u2019 specific requirements for molecule properties of catalysis, products, and reactants of chemical reactions. To train the model\u2019s molecular design capability, the following three tasks are adopted: 1) Specifying catalyst molecular properties: Given reactants to produce a specific product, the catalyst is required to meet certain properties. 2) Specifying reactant and catalyst molecular properties: Given the desired product to be synthesized, both reactants and catalysts are required to meet certain properties. 3) Specifying reactant, catalyst, and product properties: The model is required to provide a chemical reaction with specified molecular properties for reactants, catalysts, and products.\nMolecule Description. Molecule description refers to using computational models to predict and describe the function, effects, and related properties of a molecule given its name, SMILES, or other representations. We adopt the following eight subtasks to train the model\u2019s molecule description capability. We not only utilize chemical data for training to enable the model to fully understand and perceive the correspondence between molecular names in both English and Chinese, molecule descriptions, molecule SMILES, and molecule IUPAC (International Union of Pure and Applied Chemistry chemical nomenclature) names, thus obtaining strong molecular description capabilities, but also incorporate some pharmaceutical data to enhance the model\u2019s ability in the pharmaceutical field: 1) Given the molecular Chinese name, generate the English name and description. 2) Given the molecular English name, generate the Chinese name and description. 3) Given the molecular description, generate the Chinese name and English name of the molecule. 4) Given the molecular description, generate the IUPAC name and SMILES code of the molecule. 5) Given the molecular SMILES code, generate the IUPAC name and SMILES code of the molecule. 6) Given the molecular IUPAC name, generate the SMILES code and description of the molecule. 7) Given the Chinese name of a drug, generate the English name and description. 8) Given the English name of a drug, generate the Chinese name and description. 9) Given the description of a drug, generate the Chinese name and English name.\nYield Prediction. Yield prediction in chemical reactions refers to the estimation, through experimental or computational methods, of the ratio between the actual quantity of products generated in a chemical reaction and the theoretically maximum possible yield. We also train the model by predicting corresponding yields for given chemical reactions."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Pre-training data source",
|
| 75 |
+
"text": "We utilize publicly available high-quality datasets in the field of chemistry, as well as close-source datasets within our own team, as the raw datasets. Then, we transform them into instruction tuning datasets using the aforementioned prompt templates."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.3.1",
|
| 79 |
+
"parent_section_id": "4.3",
|
| 80 |
+
"section_name": "4.3.1 Publicly Available Datasets",
|
| 81 |
+
"text": "USPTO [53 ###reference_b53###] USPTO collects reaction data extracted through text mining from United States patents published between 1976 and September 2016.\nCHEBI [54 ###reference_b54###] Chemical Entities of Biological Interest (CHEBI) is a freely available dictionary of molecular entities focused on \u201csmall\u201d chemical compounds. The term \u201cmolecular entity\u201d refers to any constitutionally or isotopically distinct atom, molecule, ion, ion pair, radical, radical ion, complex, conformer, etc., identifiable as a separately distinguishable entity. The molecular entities in question are either products of nature or synthetic products used to intervene in the processes of living organisms.\nCJHIF [50 ###reference_b50###] Chemical Journals with High Impact factors (CJHIF) is a high-quality dataset containing a large number of chemical reaction equations extracted from various chemical journals.\nPubChem [55 ###reference_b55###] PubChem is an open chemistry database at the National Institutes of Health (NIH), which mostly contains small molecules, but also larger molecules such as nucleotides, carbohydrates, lipids, peptides, and chemically-modified macromolecules.\nText2Mol [56 ###reference_b56###] Text2Mol provides a large amount of data containing natural language descriptions of molecules."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3.2",
|
| 85 |
+
"parent_section_id": "4.3",
|
| 86 |
+
"section_name": "4.3.2 Close Source Datasets",
|
| 87 |
+
"text": "Drug Instruction We collect a large number of drug names, drug descriptions, and corresponding molecular formulas from drug instruction to enhance the model\u2019s capabilities in the pharmaceutical domain.\nOrganic Compound Manual We have a large collection of private organic compound manuals, containing information such as organic compound names, compound descriptions, compound SMILES, etc.\nMolecular Formula and Name Reference Table We have collected a large amount of publicly available data on compound names and their corresponding molecular formulas.\nSMILES, IUPAC Names, and Molecular Descriptions Reference Table We have collected data on SMILES, IUPAC names, and their corresponding molecular descriptions."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.4",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "Data Transformation For Instruction Tuning",
|
| 93 |
+
"text": "We extract reaction data into reactant SMILES, catalyst SMILES, product SMILES, and yield data. Then we conduct data augmentation, that is if there are multiple reactants, catalysts, or products, we shuffle the SMILES of these compounds.\nFor retro-synthetic prediction, product inference, and yield inference, we organize reactant SMILES, catalyst SMILES, product SMILES, and yield data according to the prompt templates. For molecule design, we use the RDKit tool to randomly select 1-20 properties from a candidate pool of 172 properties to fill in the prompt templates."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.5",
|
| 97 |
+
"parent_section_id": "4",
|
| 98 |
+
"section_name": "Data Details",
|
| 99 |
+
"text": "Table 3 ###reference_### lists the data scale used for each task. We have over a hundred million data entries in total, with an average length exceeding 150 tokens. The total number of tokens trained exceeds 15 billion."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4.6",
|
| 103 |
+
"parent_section_id": "4",
|
| 104 |
+
"section_name": "Training Details",
|
| 105 |
+
"text": ""
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "4.6.1",
|
| 109 |
+
"parent_section_id": "4.6",
|
| 110 |
+
"section_name": "4.6.1 Base Model",
|
| 111 |
+
"text": "We select our team\u2019s self-developed BatGPT-15B model [29 ###reference_b29###] as the base model for instruction tuning. BatGPT-15B is a large bilingual model for both Chinese and English, pre-trained using bidirectional autoregressive methods, and has demonstrated excellent performance on public benchmarks such as CMMLU [57 ###reference_b57###]."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "4.6.2",
|
| 115 |
+
"parent_section_id": "4.6",
|
| 116 |
+
"section_name": "4.6.2 Vocabulary Expansion",
|
| 117 |
+
"text": "Since the BatGPT-15B model is originally designed for natural language, particularly Chinese and English, it lacks comprehensive coverage of specialized terms in chemistry or SMILES. Consequently, expanding its vocabulary becomes necessary. We employ the Byte Pair Encoding (BPE) algorithm to train a vocabulary using diverse training data, encompassing various forms of molecular SMILES, chemical equation SMILES expressions, molecular names, and more. We also include all chemical element symbols in the augmented vocabulary to empower the model with the potential to handle all chemical elements. Subsequently, we merge this augmented vocabulary with that of BatGPT-15B, ultimately yielding a final vocabulary size of 151851."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "4.6.3",
|
| 121 |
+
"parent_section_id": "4.6",
|
| 122 |
+
"section_name": "4.6.3 Training Settings",
|
| 123 |
+
"text": "We train our model using the deepspeed zero2 strategy on an Nvidia A800 GPU cluster. We set the maximum length to 2048, the batch size per GPU to 8, utilize the AdamW optimizer with a learning rate of 2e-4, and employ the cosine learning rate schedule strategy. We enable gradient checkpointing and set max gradient normalization to 1.0 and weight decay to 0.1."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5",
|
| 127 |
+
"parent_section_id": null,
|
| 128 |
+
"section_name": "Data and Code Availability",
|
| 129 |
+
"text": "Working in progress."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "6",
|
| 133 |
+
"parent_section_id": null,
|
| 134 |
+
"section_name": "Acknowledgements",
|
| 135 |
+
"text": "Working in progress."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "7",
|
| 139 |
+
"parent_section_id": null,
|
| 140 |
+
"section_name": "Authors\u2019 Contributions",
|
| 141 |
+
"text": "Working in progress."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "8",
|
| 145 |
+
"parent_section_id": null,
|
| 146 |
+
"section_name": "Competing Interests",
|
| 147 |
+
"text": "The authors declare that they have no competing interests."
|
| 148 |
+
}
|
| 149 |
+
],
|
| 150 |
+
"appendix": [
|
| 151 |
+
{
|
| 152 |
+
"section_id": "Appendix 1",
|
| 153 |
+
"parent_section_id": null,
|
| 154 |
+
"section_name": "Appendix A Case Study",
|
| 155 |
+
"text": "###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13###"
|
| 156 |
+
}
|
| 157 |
+
],
|
| 158 |
+
"tables": {
|
| 159 |
+
"1": {
|
| 160 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T1.10\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T1.10.11.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T1.10.11.1.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.11.1.1.1\" style=\"font-size:70%;\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T1.10.11.1.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.11.1.2.1\" style=\"font-size:70%;\">Falcon</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T1.10.11.1.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.11.1.3.1\" style=\"font-size:70%;\">Llama3-8b</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T1.10.11.1.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.11.1.4.1\" style=\"font-size:70%;\">ChemDFM</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T1.10.11.1.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.11.1.5.1\" style=\"font-size:70%;\">BatGPT-Chem</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.2.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.2.2.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.2.2.3.1\" style=\"font-size:70%;\">(a) SM</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T1.2.2.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.2.2.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T1.2.2.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.2.2.5.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T1.1.1.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T1.2.2.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.4.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.4.4.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.4.4.3.1\" style=\"font-size:70%;\">(b) HTE BH</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.4.4.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.4.4.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.4.4.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.4.4.5.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.3.3.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.4.4.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.5.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.5.5.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.5.5.2.1\" style=\"font-size:70%;\">(c) ELN BH</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.5.5.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.5.5.3.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.5.5.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.5.5.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.5.5.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.5.5.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.5.5.5.1\" style=\"font-size:70%;\">60.8</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.6.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.6.6.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.6.6.2.1\" style=\"font-size:70%;\">(d) AAAA</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.6.6.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.6.6.3.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.6.6.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.6.6.4.1\" style=\"font-size:70%;\">0.5</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.6.6.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.6.6.5.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.6.6.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.7.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.7.7.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.7.7.2.1\" style=\"font-size:70%;\">(e) Denmark</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.7.7.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.7.7.3.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.7.7.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.7.7.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.7.7.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.7.7.5.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.7.7.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.8.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.8.8.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.8.8.2.1\" style=\"font-size:70%;\">(f) AHO</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.8.8.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.8.8.3.1\" style=\"font-size:70%;\">0.4</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.8.8.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.8.8.4.1\" style=\"font-size:70%;\">0.4</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.8.8.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.8.8.5.1\" style=\"font-size:70%;\">1.8</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.8.8.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.9.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T1.9.9.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.9.9.2.1\" style=\"font-size:70%;\">(g) BioChem</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.9.9.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.9.9.3.1\" style=\"font-size:70%;\">2.7</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.9.9.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.9.9.4.1\" style=\"font-size:70%;\">3.4</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.9.9.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.9.9.5.1\" style=\"font-size:70%;\">17.8</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T1.9.9.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.10.10\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S2.T1.10.10.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.10.2.1\" style=\"font-size:70%;\">(h) USPTO-100</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T1.10.10.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.10.3.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T1.10.10.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.10.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T1.10.10.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T1.10.10.5.1\" style=\"font-size:70%;\">65.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T1.10.10.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\" style=\"font-size:70%;\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T1.14.1.1\" style=\"font-size:129%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S2.T1.15.2\" style=\"font-size:129%;\">Top-10 Coverage of reactions for zero-shot retrosynthesis prediction benchmark.</span></figcaption>\n</figure>",
|
| 161 |
+
"capture": "Table 1: Top-10 Coverage of reactions for zero-shot retrosynthesis prediction benchmark."
|
| 162 |
+
},
|
| 163 |
+
"2": {
|
| 164 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T2.10\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T2.10.11.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T2.10.11.1.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.11.1.1.1\" style=\"font-size:70%;\">Model</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T2.10.11.1.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.11.1.2.1\" style=\"font-size:70%;\">Falcon</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T2.10.11.1.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.11.1.3.1\" style=\"font-size:70%;\">Llama3-8b</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T2.10.11.1.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.11.1.4.1\" style=\"font-size:70%;\">ChemDFM</span></th>\n<th class=\"ltx_td ltx_align_right ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S2.T2.10.11.1.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.11.1.5.1\" style=\"font-size:70%;\">BatGPT-Chem</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T2.2.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.2.2.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.2.2.3.1\" style=\"font-size:70%;\">(a) SM dataset</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T2.2.2.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.2.2.4.1\" style=\"font-size:70%;\">0.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T2.2.2.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.2.2.5.1\" style=\"font-size:70%;\">40.0</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T2.1.1.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right ltx_border_t\" id=\"S2.T2.2.2.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.4.4\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.4.4.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.4.4.3.1\" style=\"font-size:70%;\">(b) HTE BH dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.4.4.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.4.4.4.1\" style=\"font-size:70%;\">40.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.4.4.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.4.4.5.1\" style=\"font-size:70%;\">76.0</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.3.3.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.4.4.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.5.5\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.5.5.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.5.5.2.1\" style=\"font-size:70%;\">(c) ELN BH dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.5.5.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.5.5.3.1\" style=\"font-size:70%;\">45.9</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.5.5.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.5.5.4.1\" style=\"font-size:70%;\">77.5</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.5.5.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.5.5.5.1\" style=\"font-size:70%;\">99.4</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.5.5.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.6.6\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.6.6.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.6.6.2.1\" style=\"font-size:70%;\">(d) AAA dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.6.6.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.6.6.3.1\" style=\"font-size:70%;\">86.5</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.6.6.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.6.6.4.1\" style=\"font-size:70%;\">85.1</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.6.6.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.6.6.5.1\" style=\"font-size:70%;\">99.1</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.6.6.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.7.7\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.7.7.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.7.7.2.1\" style=\"font-size:70%;\">(e) Denmark dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.7.7.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.7.7.3.1\" style=\"font-size:70%;\">9.6</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.7.7.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.7.7.4.1\" style=\"font-size:70%;\">88.8</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.7.7.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.7.7.5.1\" style=\"font-size:70%;\">99.2</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.7.7.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.8.8\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.8.8.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.8.8.2.1\" style=\"font-size:70%;\">(f) AHO dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.8.8.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.8.8.3.1\" style=\"font-size:70%;\">69.1</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.8.8.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.8.8.4.1\" style=\"font-size:70%;\">84.1</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.8.8.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.8.8.5.1\" style=\"font-size:70%;\">99.6</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.8.8.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.9.9\">\n<td class=\"ltx_td ltx_align_left\" id=\"S2.T2.9.9.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.9.9.2.1\" style=\"font-size:70%;\">(g) BioChem dataset</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.9.9.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.9.9.3.1\" style=\"font-size:70%;\">48.7</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.9.9.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.9.9.4.1\" style=\"font-size:70%;\">74.7</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.9.9.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.9.9.5.1\" style=\"font-size:70%;\">95.1</span></td>\n<td class=\"ltx_td ltx_align_right\" id=\"S2.T2.9.9.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.10.10\">\n<td class=\"ltx_td ltx_align_left ltx_border_bb\" id=\"S2.T2.10.10.2\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.10.2.1\" style=\"font-size:70%;\">(h) USPTO-100 dataset</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T2.10.10.3\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.10.3.1\" style=\"font-size:70%;\">35.1</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T2.10.10.4\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.10.4.1\" style=\"font-size:70%;\">75.3</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T2.10.10.5\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"><span class=\"ltx_text\" id=\"S2.T2.10.10.5.1\" style=\"font-size:70%;\">99.3</span></td>\n<td class=\"ltx_td ltx_align_right ltx_border_bb\" id=\"S2.T2.10.10.1\" style=\"padding-left:9.0pt;padding-right:9.0pt;\"></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\" style=\"font-size:70%;\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T2.14.1.1\" style=\"font-size:129%;\">Table 2</span>: </span><span class=\"ltx_text\" id=\"S2.T2.15.2\" style=\"font-size:129%;\">Top-10 Validity of reactions for zero-shot retrosynthesis prediction benchmark.</span></figcaption>\n</figure>",
|
| 165 |
+
"capture": "Table 2: Top-10 Validity of reactions for zero-shot retrosynthesis prediction benchmark."
|
| 166 |
+
},
|
| 167 |
+
"3": {
|
| 168 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.2.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt ltx_border_t\" id=\"S4.T3.2.1.1.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.2.1.1.1.1\" style=\"font-size:70%;\">Task</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt ltx_border_t\" id=\"S4.T3.2.1.1.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.1.1.2.1\" style=\"font-size:70%;\">Amount</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.2.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T3.2.2.1.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.2.1.1.1\" style=\"font-size:70%;\">Retro-synthesis Prediction</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.2.2.1.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.2.1.2.1\" style=\"font-size:70%;\">30114006</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.2.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.2.3.2.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.3.2.1.1\" style=\"font-size:70%;\">Product Inference</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.2.3.2.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.3.2.2.1\" style=\"font-size:70%;\">30114006</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.2.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.2.4.3.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.4.3.1.1\" style=\"font-size:70%;\">Molecule Design</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.2.4.3.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.4.3.2.1\" style=\"font-size:70%;\">40695857</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.2.5.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.2.5.4.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.5.4.1.1\" style=\"font-size:70%;\">Molecule Description</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.2.5.4.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.5.4.2.1\" style=\"font-size:70%;\">210469</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.2.6.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.2.6.5.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.6.5.1.1\" style=\"font-size:70%;\">Yield Prediction</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.2.6.5.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.6.5.2.1\" style=\"font-size:70%;\">10775991</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.2.7.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb ltx_border_b ltx_border_t\" id=\"S4.T3.2.7.6.1\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.7.6.1.1\" style=\"font-size:70%;\">Total</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_b ltx_border_t\" id=\"S4.T3.2.7.6.2\" style=\"padding-left:12.0pt;padding-right:12.0pt;\"><span class=\"ltx_text\" id=\"S4.T3.2.7.6.2.1\" style=\"font-size:70%;\">111910329</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\" style=\"font-size:70%;\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T3.5.1.1\" style=\"font-size:129%;\">Table 3</span>: </span><span class=\"ltx_text\" id=\"S4.T3.6.2\" style=\"font-size:129%;\">Data details.</span></figcaption>\n</figure>",
|
| 169 |
+
"capture": "Table 3: Data details."
|
| 170 |
+
}
|
| 171 |
+
},
|
| 172 |
+
"image_paths": {
|
| 173 |
+
"1": {
|
| 174 |
+
"figure_path": "2408.10285v1_figure_1.png",
|
| 175 |
+
"caption": "Figure 1: The annotated reaction graphs. The different fingerprints of reactions are visualized using a TMAP algorithm [32] and the Faerun visualization library [33].",
|
| 176 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/tmap/reaction_all.png"
|
| 177 |
+
},
|
| 178 |
+
"2": {
|
| 179 |
+
"figure_path": "2408.10285v1_figure_2.png",
|
| 180 |
+
"caption": "Figure 2: Top-10 MaxFrag accuracy of prediction of different datasets.",
|
| 181 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/maxfrag.png"
|
| 182 |
+
},
|
| 183 |
+
"3": {
|
| 184 |
+
"figure_path": "2408.10285v1_figure_3.png",
|
| 185 |
+
"caption": "Figure 3: Top-10 Intersection accuracy of prediction of different datasets.",
|
| 186 |
+
"url": "http://arxiv.org/html/2408.10285v1/x1.png"
|
| 187 |
+
},
|
| 188 |
+
"4": {
|
| 189 |
+
"figure_path": "2408.10285v1_figure_4.png",
|
| 190 |
+
"caption": "Figure 4: Comparison of predictions between BatGPT-Chem and ChemDFM where products are displayed in pink blocks, reactants are in green blocks, and reaction conditions are in yellow blocks. \ud835\udc1a\ud835\udc1a\\mathbf{a}bold_a An example from the ELN BH dataset. \ud835\udc1b\ud835\udc1b\\mathbf{b}bold_b An example from the Denmark dataset.",
|
| 191 |
+
"url": "http://arxiv.org/html/2408.10285v1/x2.png"
|
| 192 |
+
},
|
| 193 |
+
"5": {
|
| 194 |
+
"figure_path": "2408.10285v1_figure_5.png",
|
| 195 |
+
"caption": "Figure 5: Analysis of predictions generated by BatGPT-Chem. \ud835\udc1a\ud835\udc1a\\mathbf{a}bold_a Products sampled from the SM, the HTE BH, the AHO, and the BioChem dataset, respectively. \ud835\udc1b\ud835\udc1b\\mathbf{b}bold_b Numbers of prediction within Top-k\ud835\udc58kitalic_k. \ud835\udc1c\ud835\udc1c\\mathbf{c}bold_c Details of predictions where green means ground truth is covered and red means not.",
|
| 196 |
+
"url": "http://arxiv.org/html/2408.10285v1/x3.png"
|
| 197 |
+
},
|
| 198 |
+
"6": {
|
| 199 |
+
"figure_path": "2408.10285v1_figure_6.png",
|
| 200 |
+
"caption": "Figure 6: The illustration of our unified modeling between natural language and SMILES.",
|
| 201 |
+
"url": "http://arxiv.org/html/2408.10285v1/x4.png"
|
| 202 |
+
},
|
| 203 |
+
"7": {
|
| 204 |
+
"figure_path": "2408.10285v1_figure_7.png",
|
| 205 |
+
"caption": "Figure 7: Case 1 from USPTO-50k. Give the product CNc1nc(Cl)ncc1[N+](=O)[O-], the model successfully predicts the correct reactant CN.O=[N+]([O-])c1cnc(Cl)nc1Cl. It also simultaneously provides a potential catalyst C1COCC1, which is a commonly used catalyst.",
|
| 206 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/USPTO_0.png"
|
| 207 |
+
},
|
| 208 |
+
"8": {
|
| 209 |
+
"figure_path": "2408.10285v1_figure_8.png",
|
| 210 |
+
"caption": "Figure 8: Case 2 from USPTO-50k. Give the product CCOC(=O)c1cnc(N)c2c(COc3cc(-c4nnc(-c5ccc(Cl)cc5)o4)ccc3C)csc12, the model successfully predicts the correct reactant N.Clc1c2c(scc2COc2c(C)ccc(-c3nnc(-c4ccc(Cl)cc4)o3)c2)c(C(OCC)=O)cn1. The model also predicts a catalyst C(C)(O)C, which could act as a solvent.",
|
| 211 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/USPTO_2.png"
|
| 212 |
+
},
|
| 213 |
+
"9": {
|
| 214 |
+
"figure_path": "2408.10285v1_figure_9.png",
|
| 215 |
+
"caption": "Figure 9: Case 3 from BioChem.\nGive the product CNCC1Cc2cc(-c3ccccc3)cc(-c3ccccc3Cl)c2O1, the model predicts the correct reactant CN.Cc1ccc(S(=O)(=O)OCC2Cc3cc(-c4ccccc4)cc(-c4ccccc4Cl)c3O2)cc1. A catalyst S(C)(=O)C, which bears a resemblance to the reactant structure, is predicted, possibly serving as a \u201dreaction fragment\u201d or an \u201dintermediate product.\u201d",
|
| 216 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/BioChem_0.png"
|
| 217 |
+
},
|
| 218 |
+
"10": {
|
| 219 |
+
"figure_path": "2408.10285v1_figure_10.png",
|
| 220 |
+
"caption": "Figure 10: Case 4 from BioChem. For the product COc1ccc([C@@H]2Sc3cc(C)ccc3N(CCN(C)Cc3ccccc3)C(=O)[C@@H]2OC(C)=O)cc1, the model successfully predicts the reactant \nCC(=O)OC(C)=O.c12ccc(C)cc1S[C@@H](c1ccc(OC)cc1)[C@@H](O)C(=O)N2CCN(C)Cc1ccccc1 and provides a catalyst c1cccnc1, which could be a solvent.",
|
| 221 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/BioChem_1.png"
|
| 222 |
+
},
|
| 223 |
+
"11": {
|
| 224 |
+
"figure_path": "2408.10285v1_figure_11.png",
|
| 225 |
+
"caption": "Figure 11: Case 5 from BioChem. For the product CCc1nc2ccccc2c(=O)n1CCCl, the model predicts the correct reactants O=S(Cl)Cl.c12ccccc1nc(CC)n(CCO)c2=O, and provides a catalyst ClC(Cl)Cl, which could be a solvent.",
|
| 226 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/BioChem_2.png"
|
| 227 |
+
},
|
| 228 |
+
"12": {
|
| 229 |
+
"figure_path": "2408.10285v1_figure_12.png",
|
| 230 |
+
"caption": "Figure 12: Case 6 from our private benchmark AAAA. For the product C=C[C@H](c1ccccc1)n1cnc2ccccc21, the model predicts a reactant C=C.c12ccccc1[nH]cn2.O=C(OC/C=C/c1ccccc1)OC with an additional small molecule ethylene, and successfully predicts furan as the solvent. The solvent information appears only in the original paper of this reaction, demonstrating that the model successfully transfers knowledge from the literature to retro-synthesis prediction tasks after being trained on a large dataset.",
|
| 231 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/AAA_0.png"
|
| 232 |
+
},
|
| 233 |
+
"13": {
|
| 234 |
+
"figure_path": "2408.10285v1_figure_13.png",
|
| 235 |
+
"caption": "Figure 13: Case 7 from our private benchmark AAAA. For the reactant C=C[C@H](c1ccc(Br)cc1)n1cnc2ccccc21, the model correctly predicts the reactant c1nc2ccccc2[nH]1.CBr.C=C.COC(=O)OC\\\\C=C\\\\c1ccc(Br)cc1 but includes an additional ethylene and its corresponding hydrogen halide molecule. The main reason might be the presence of a halogen substituent on the reactant\u2019s ring, which is relatively reasonable. The ligand is not predicted, but furan is successfully predicted as the solvent.",
|
| 236 |
+
"url": "http://arxiv.org/html/2408.10285v1/extracted/5799224/figs/case/AAA_1.png"
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
"validation": true,
|
| 240 |
+
"references": [],
|
| 241 |
+
"url": "http://arxiv.org/html/2408.10285v1"
|
| 242 |
+
}
|
20240819/2408.10351v1.json
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "The Psychological Impacts of Algorithmic and AI-Driven Social Media on Teenagers: A Call to Action",
|
| 3 |
+
"abstract": "This study investigates the meta-issues surrounding social media, which, while theoretically designed to enhance social interactions and improve our social lives by facilitating the sharing of personal experiences and life events, often results in adverse psychological impacts. Our investigation reveals a paradoxical outcome: rather than fostering closer relationships and improving social lives, the algorithms and structures that underlie social media platforms inadvertently contribute to a profound psychological impact on individuals, influencing them in unforeseen ways. This phenomenon is particularly pronounced among teenagers, who are disproportionately affected by curated online personas, peer pressure to present a perfect digital image, and the constant bombardment of notifications and updates that characterize their social media experience. As such, we issue a call to action for policymakers, platform developers, and educators to prioritize the well-being of teenagers in the digital age and work towards creating secure and safe social media platforms that protect the young from harm, online harassment, and exploitation.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Social media is a persuasive technology, a concept introduced by B.J. Fogg [fogg1998] in the late 1990s. Persuasive technologies are designed to interactively influence people\u2019s attitudes or behaviors. Social media platforms use specialized algorithms, AI models, and user interfaces designed to capture and retain user attention, using techniques such as personalized recommendation content feeds, notifications, and interactive designs. Such methods aim to maximize user engagement, often leading to prolonged and repetitive use.\nOn January 31, 2024, the United States Senate Judiciary Committee summoned social media companies\u2019 Chief Executive Officers (CEOs), including Meta, TikTok, X, Discord, and Snapchat, for failure to protect children from harm and child sexual abuse on social media platforms [senatejudiciary]. Several parents of children, including those whose children died due to online extortion, sexual abuse, and exposure to harmful content on social media platforms, attended the hearing [wsj].\nThe current social media landscape prioritizes sensationalism and engagement over factual accuracy. It creates a culture where individuals feel compelled to curate a polished, relatable, and entertaining online persona while navigating the treacherous waters of public judgment. The result is a culture of superficial connections and shallow thinking, where the art of deep conversation and critical thinking gradually erodes.\nThe pervasive use of social media by children raises significant concerns about its impact on their mental and physical health. Research indicates that young users are particularly susceptible to the persuasive elements of social media [pew, pulsesurvey1]. Continuous and endless scrolling, facilitated by infinite content feeds, can lead to excessive screen time, which has been associated with a range of adverse psychological outcomes [OASH]. These risks highlight the importance of continuous research, discussions, and efforts by the research community, technology industry, and governments to protect children from social media\u2019s psychological impacts, harm, and abuse.\nThis paper explores the challenges associated with social media use by children, along with potential solutions, guided by the following research questions:\nWhat factors contribute to the psychological impacts of algorithmic and AI-driven social media on teenagers?\nHow can legislators, social media providers, and educators address these challenges to minimize the impact of social media?\nWhat are the potential benefits and limitations of alternative social media models in addressing these issues?\nThe remainder of the paper is organized as follows. Section II ###reference_### discusses social media\u2019s psychological impact. Section III ###reference_### presents the current meta-issues with social media and content consumption. Section IV ###reference_### details the necessary actions to be taken by governments, educational institutions, and the social media industry, followed by the positive potential of Mastodon in Section V ###reference_###. Section VI ###reference_### outlines actionable recommendations and Section VII ###reference_### concludes the paper."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Psychological Impacts of Social Media on Teens",
|
| 15 |
+
"text": "The widespread use of social media by children and its effect on their mental health has garnered significant attention from the media, researchers, and governments recently. Several studies emphasize the connection between social media usage and psychological problems among children and teenagers [mougharbel2023heavy, keles].\nU.S. Surgeon General Dr. Vivek Murthy issued an advisory in May 2023 highlighting the risk social media poses to children\u2019s mental health and well-being [OASH]. According to a 2023 survey, nearly half of the 1453 surveyed children between the ages of 13 and 17 in the United States used social media apps almost constantly, which doubled compared to 2014-2015 [pew]. The other half reported using social media several times a day. Another survey of 1480 children between 13 and 17, conducted by Boston Children Digital Wellness Lab in 2022, reported that children spent an average of 8.2 hours daily on social media, and 57% felt they use it too much [pulsesurvey1]. In another survey by Boston Children Digital Wellness Lab in 2023 [pulsesurvey2], 38% of the children reported a negative (e.g., uncomfortable, unsafe) experience with social media platforms.\nA systematic review conducted by \\citeauthorkeles [keles] indicated a noteworthy association between extensive social media usage and its impact, including psychological distress, anxiety, and depressive symptoms among teenagers. The review highlighted factors such as the constant need for social validation and exposure to cyberbullying as contributing factors [keles]. The harmful effects of social media can extend to severe outcomes such as suicidal thoughts and behavior. Multiple studies have documented cases where excessive use of social media, screen time, and online victimization have led to tragic outcomes among adolescents [CHEN, kim, senkal, peprah].\nThese findings highlight the immediate need for action and effective strategies to mitigate social media\u2019s negative psychological impacts on children."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "III The Rise of Doom Scrolling: A Social Media Phenomenon",
|
| 21 |
+
"text": "Doom scrolling refers to the act of mindlessly scrolling through social media, often out of boredom, habit, or anxiety. For many individuals, their smartphones have become a trusted companion, offering a welcome respite from the demands of daily life. This reliance on mobile devices was particularly pronounced during the COVID-19 pandemic, as people sought solace in their phones during extended periods of isolation and remote work [Grosser2022wd, Sharma2022].\nAccording to Pew Research Center findings [Vogels_2023], YouTube is the most frequently used social media platform among teenagers, with an overwhelming majority (77%) using it every day. While TikTok usage is not as widespread, still a significant proportion (58%) of teens engage with this app daily. Instagram and Snapchat are also popular choices for daily use, with roughly half (50% and 51%, respectively) of teens reporting they use these platforms at least once a day. Interestingly, only about one in five teens (19%) say they use Facebook daily.\nShort-form video content has become increasingly popular, with TikTok, Snapchat, and YouTube emerging as the leading platforms for sharing bite-sized videos. Research suggests that prolonged consumption of short-form videos can lead to difficulties in concentration, information retention, and a preference for instant gratification over longer content, ultimately affecting attention span and academic focus [Asif_Saniya_Kazi_2024]. As we mindlessly scroll through social media feeds, the constant influx of short-form videos can create a sense of FOMO (fear of missing out) that drives us to keep watching. But this addiction to instant entertainment comes at a cost, as our brains become conditioned to crave quick hits of dopamine rather than engage in more meaningful activities.\nHumans have a natural limitation when it comes to focused attention. Our brains can only maintain concentration for so long before we begin to lose momentum. This capacity for sustained focus is not set in stone; various factors, such as the complexity of the task, personal interest, motivation levels, and individual experiences, can influence it [Shanmugasundaram_2023]."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "IV What Can Be Done?",
|
| 27 |
+
"text": "The impact of social media on teenagers\u2019 mental health is a pressing concern that requires immediate attention. A comprehensive approach must be considered to address this issue, involving government regulations, industry-led initiatives, and educational programs."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "4.1",
|
| 31 |
+
"parent_section_id": "4",
|
| 32 |
+
"section_name": "IV-A Government Regulations",
|
| 33 |
+
"text": "Governments and policymakers have obligations to regulate emerging technologies, but they face scrutiny and pushback from the technology industry on their decisions [johnson2023doctrine]. At the same time, technology companies are criticized for their technology designs and their social and political impacts. Technology providers are often called to proactively consider the ramifications of their decisions early in the design process. However, they may not understand or be unwilling to respond to social issues, thus people demand their political leaders take necessary actions [wolff].\nIn the last few years, governments worldwide have actively introduced regulations to protect their citizens, especially children, from social media harm. Regulations and legislation can be critical in protecting children from social media harm. Legislation serves as the foundation for establishing a legal framework and allocating resources to address instances of misconduct. Well-crafted legislation enables governments to influence how social media platforms operate, particularly for the younger demographic [Bacon2023].\nIn the United States, federal and state governments have introduced and enacted various legislation to protect children from social media harm. Table I ###reference_### outlines the recent legislation enacted or introduced at the state and federal levels in addition to other existing legislation, such as the Children\u2019s Online Privacy Protection Act (COPPA) [coppa1998]. Section 230 [section230], an amendment to the Communications Act of 1934 [commact1934] and enacted as part of the Communications Decency Act of 1996, is a significant topic in any discussion about U.S. legislation related to social media. Section 230 grants immunity to social media providers and users, and consists of two key parts. Section 230(c)(1) outlines that service providers or users cannot be considered the publisher of any information provided by other users, and social media service providers or users cannot be held liable for acting in good faith to limit access to certain types of objectionable material. [jacobs2024, zhang2024, crs].\nOn April 30, 2024, Senator Brian Schatz introduced new federal legislation, the \u2018Kids Off Social Media Act\u2019 [UScongress]. This bill prohibits the use of social media by children under 13 and personalized recommendations for those between the ages of 12 and 17. In addition, it requires social media platform companies to terminate social media accounts of ages under 13 and delete their personal data [UScongress, senatec]. The new legislation also includes the Eyes on the Board Act of 2023 [US2023], which instructs schools to restrict and monitor the use of social media for kids on school devices and networks and implement a screen time policy. The Earn IT Act of 2023 [earnitact] establishes a National Commission on Online Child Sexual Exploitation Prevention. This bill limits the liability protections of social media providers concerning claims related to child sexual exploitation. In addition, the Earn IT Act of 2023 changes the reporting requirements for social media providers for child sexual exploitation reports to the National Center for Missing and Exploited Children, including technical facts reports and preserving the content for more than one year. The Strengthening Transparency and Obligations to Protect Children Suffering from Abuse and Mistreatment (STOP CSAM) Act of 2023 [stopcasm] mandates child abuse reporting, expands protections for child victims, empowers victims to request removal of child sexual abuse material from tech platforms, and holds tech companies accountable and class action for promoting or facilitating online child sexual exploitation. Additionally, it strengthens CyberTipline reporting requirements and mandates social media companies to submit annual reports on promoting a culture of safety for children on their platforms [stopcasm].\nIn 2024, the Utah government passed two pieces of legislation, the Utah Minor Protection in Social Media Act (S.B. 194) [utah194] and the Utah Social Media Amendments (H.B. 464) [utah464], which will be enacted in October 2024 to protect from social media harms and hold social media companies accountable for any mental health issues due to algorithmically curated social media service.\nSimilarly, New York state passed legislation, Senate Bill S7694 [nysenate], called the Stop Addictive Feeds Exploitation (SAFE) Act, to protect children from addictive feeds, provide a mechanism for parents to control their usage, and require parental consent for notifications between 12 AM and 6 AM. It highlights the importance of protecting our young generation from social media harms. These efforts by the U.S. government also acknowledge the harms and effects of social media on children.\nGovernment regulations are critical tools for a secure online experience and ensuring social media platforms adhere to fundamental safety rules for young and minor users. Regulations and laws can hold social media providers accountable, help increase transparency, and establish data protection measures. However, regulations are just one piece of the puzzle. Regulations must be part of a broader strategy to protect minors in the current technology age, including education, technological advancement, and industry-led and collaborative initiatives.\nCertain regulations propose that age verification needs to be completed to access social media. However, this brings the challenge of personal data security. Social media providers must regulate how and where personally identifiable data is stored and used. This data should not be used for other purposes like data mining, machine learning, or monetization. The government must establish an enforcement body to closely monitor the implementation of legislation, with the power to take action against social media providers. Executives of social media providers need to be held personally accountable for decisions that have caused irreparable harm to society. Finally, if a provider is unable to fulfill their obligations, then the enforcement body should have the authority to initiate a mechanism such as a kill-switch, shutting down the platform nationwide."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4.2",
|
| 37 |
+
"parent_section_id": "4",
|
| 38 |
+
"section_name": "IV-B Industry-led Initiatives",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.2.1",
|
| 43 |
+
"parent_section_id": "4.2",
|
| 44 |
+
"section_name": "IV-B1 Content Moderation",
|
| 45 |
+
"text": "Social media platforms must prioritize effective content moderation to ensure that online discourse remains respectful and safe for all users. With millions of individuals sharing their thoughts and opinions in real-time, social media enables diverse perspectives but also risks posting views that are considered offensive, harmful, or extreme by many users [GenEco].\nArtificial intelligence (AI) systems are not yet capable of making nuanced judgments about the context, intent, and cultural subtleties required for effective content moderation [duarte2017mixed]. AI algorithms rely on patterns learned from large datasets, which can lead to biases and inaccuracies when applied to real-world scenarios. Moreover, content moderation often requires empathy, creativity, and domain expertise, qualities that are difficult for AI systems to replicate. Human judgment is still essential in cases where the context is unclear or the situation requires understanding the subtleties, such as sarcasm, irony, or cultural references. While AI cannot replace human judgment entirely, it can be instrumental to human content moderators by triaging and prioritizing content based on language or context. For instance, AI algorithms can quickly analyze vast amounts of data to identify potentially problematic content, such as hate speech, violence, or explicit imagery, reducing the workload for human moderators. Additionally, AI-powered tools can detect language patterns, such as profanity, threats, or harassment, and flag them for further review by humans [Gosztonyi2023].\nThe power of human-AI collaboration lies in combining AI\u2019s triaging capabilities with human content moderators\u2019 expertise, creating a more effective and efficient content moderation process. This synergy begins when AI identifies potentially problematic content and flags it for human review, allowing trained moderators to assess flagged content, taking into account context, intent, and cultural nuances. Moderators then work with AI algorithms to make informed decisions, combining their expertise with the AI\u2019s insights [molina2022modertation].\nAI can analyze context clues, like surrounding text, hashtags, or user profiles, to better understand the intent behind a piece of content. This contextual understanding can help human moderators make more informed decisions about whether content violates community guidelines. AI algorithms can prioritize content based on factors like severity, audience reach, or potential harm caused, allowing human moderators to focus on the most critical cases."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2.2",
|
| 49 |
+
"parent_section_id": "4.2",
|
| 50 |
+
"section_name": "IV-B2 Data Practices and Digital Responsibility",
|
| 51 |
+
"text": "The culprit behind this addiction-fueled chaos is the relentless pursuit of data-driven profits. Social media companies harvest vast amounts of user data, including location information, search queries, browsing history, and even biometric data (e.g., facial recognition), to create targeted advertisements that are tailored to our individual preferences. This data is then sold to third-party advertisers, who use it to craft persuasive messages that can sway our purchasing decisions.\nTeenagers today are growing up in a world where social media is integral to their daily lives. They use social media platforms to connect with friends, family, and influencers. However, data privacy and security concerns increase as they share more about themselves online, raising questions about long-term consequences of their digital footprints.\nThe repeated instances of data and privacy breaches among major organizations, including social media giants, have led to a decline in customer trust and increased concerns about online security and personal data protection [Ayaburi2020]. The consequences of such breaches are far-reaching and devastating, with potential outcomes including identity theft [identity2007], financial losses [poyraz2020cyber], and even emotional distress [Labrecque2021data]. As millions of individuals entrust these platforms with their most intimate details, from passwords to credit card numbers, any lapse in security can have catastrophic repercussions.\nThe digital advertising sector heavily depends on the use of behavioral tracking, which involves monitoring consumers\u2019 online activities to refine targeted advertising efforts [Johnson2020]. Teenagers are particularly vulnerable to targeted advertising, as they may not fully understand how their online behavior is being tracked and used to influence their purchasing decisions [radesky2020digital]. The European Union\u2019s Digital Services Act (DSA) [europarl] marks a positive step towards protecting consumers by prohibiting misleading practices and certain forms of targeted advertising, including those that target children or use sensitive data. Additionally, the DSA aims to prevent \u201cdark patterns\u201d and other deceptive tactics designed to manipulate users\u2019 decisions. The DSA serves as a model for other countries, emphasizing the need for similar regulations to safeguard consumer rights and promote transparency in the digital market."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "IV-C Educational Programs",
|
| 57 |
+
"text": "The responsibility for shaping responsible digital behavior extends beyond government and industry to include educational institutions, which have a unique opportunity to influence the next generation\u2019s online habits. Educational institutions such as schools can become the first step in helping navigate the complex issue of algorithm-driven social media\u2019s impact on teenagers, making it a valuable resource for those looking to support young people online [Dennen2020].\nDigital citizenship encompasses not just the technical skills needed to engage online but also the ethical and responsible behaviors that come with using digital technology, involving respectful and considerate interactions [Lynn2022]. Educational institutions can start by teaching students how to effectively use technology, including basic computer skills, online safety, and digital etiquette. This foundation is essential for students to navigate the digital world responsibly. Institutions can discuss the importance of online reputations and how to maintain a positive digital footprint through responsible social media use and self-reflection [Buchanan2018]. This helps students understand that their online actions have consequences and encourages them to think critically about their digital presence.\nEducational institutions have the power to incorporate and integrate digital literacy into existing subjects, including health education, social studies, language arts and many more [roblyer2019integrating]. Educators can employ case studies in social studies classes to investigate historical and contemporary technological developments alongside their societal implications. Students can delve into how past and present societies have adapted to new technologies, exploring themes like the evolution of online expression, data protection, and the global economic effects of technological advancements. Schools can employ role-playing exercises that present hypothetical situations, allowing students to practice responding effectively to online conflicts or encountering inappropriate content [LAURICELLA2020103989]. This hands-on approach enables students to develop essential skills in managing digital interactions and making responsible choices."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "A Glimmer of Hope: Mastodon\u2019s Promise",
|
| 63 |
+
"text": "The rise of Mastodon [mastodon], a decentralized social media platform, offers a glimmer of hope for those seeking a more open and inclusive online space. The decentralized nature of Mastodon allows instances to scale independently, built over ActivityPub which enables different servers to communicate with each other. [MastodonGrat].\nMastodon\u2019s promise is simple yet profound: it allows users to create their own communities (called \u201cinstances\u201d) where they can engage in open discussions without the constraints of corporate algorithms or interests. This decentralized approach encourages diversity, creativity, and individuality, making it an attractive option for those seeking a more authentic online experience. In traditional centralized platforms, moderation is typically managed from a single hub, where a coordinated team of moderators works in sync to oversee content across the entire platform. In contrast, decentralized systems like Mastodon fragment these moderation capabilities and efforts, introducing added complexity. Table II ###reference_### presents a side-by-side analysis of centralized social media platforms versus Mastodon, highlighting their similarities and differences. Each individual instance within the Mastodon network has its own distinct group of administrators and moderators responsible for reviewing and curating content as it flows through its unique community space. [ACM16Mastodon], [MastodonRules]. When an instance is not enforcing its community guidelines or is allowing harmful content to spread, defederation can be used to block communication with that instance. This means that users from other instances won\u2019t be able to interact with the problematic instance, reducing the spread of harmful information and mitigating its impact. [Colglazier_TeBlunthuis_Shaw_2024]\nTeenagers are especially well-positioned to benefit from Mastodon\u2019s community-based approach, which aims to offer a safe environment with responsible moderation, free from corporate algorithms, data collection and tracking. Such models offer young people a chance to develop healthy online habits and cultivate meaningful connections with others by providing a platform that values openness, inclusivity, and creativity.\nMastodon provides many benefits, such as a decentralized platform for free speech and community building. However, it also has downsides, notably the potential to create echo chambers [EchoChamber]. An echo chamber is an environment where people are only exposed to information and perspectives that reinforce their beliefs and biases without being challenged or confronted with opposing viewpoints. When users join a Mastodon instance, they are often drawn to communities that align with their interests, values, or demographics. This can lead to a self-selecting process where like-minded individuals congregate in specific instances, creating an environment where similar perspectives and ideas dominate [HOBOLT_LAWALL_TILLEY_2023]. As Mastodon instances grow, users may curate content by favoriting, reblogging, or commenting on posts that align with their beliefs. This can create a feedback loop where only certain types of content are amplified, making it difficult for opposing viewpoints to gain traction. Mastodon lacks powerful automatic content moderation and is highly dependent on community efforts for self-moderation. This means that users are responsible for reporting and addressing any issues with content rather than relying on AI-powered moderators.\nMastodon\u2019s model is a step in the right direction towards creating a healthier, more inclusive and decentralized online ecosystem. Although this approach informs potential improvements to traditional social media platforms, it is not intended as a direct replacement, but rather serves as a complementary platform. By leveraging Mastodon\u2019s strengths and understanding its limitations, we can work towards creating a comprehensive, informed technology solution that addresses the root causes of social media\u2019s problems."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "VI A Call to Action",
|
| 69 |
+
"text": "Based on our findings, we propose the following multi-pronged call to action:\nStrengthen Legislation and Enforcement Bodies: Government and social media companies must provide transparent and age-appropriate content guidelines. Social media providers must regulate algorithmic transparency and make regular updates available to the public. Adopting Mastodon\u2019s decentralized approach allows each instance to enforce its own public moderation policies, ensuring users understand content guidelines and promoting open-source algorithm inspection.\nIntegrate Digital Wellness Education: Educational institutions must incorporate digital wellness education into their curriculum, focusing on psychological impacts and enabling students to develop essential skills in managing digital interactions and making responsible choices.\nRegularly Review and Update Policies: Conduct regular data collection and processing audits to ensure compliance with evolving regulations (e.g., GDPR, CCPA).\nPrioritize Teenagers\u2019 Well-being: It is essential that social media companies implement robust measures to prevent the sharing of explicit content, especially among teenagers. This involves not only flagging inappropriate material but also educating users about online safety and responsible behavior.\nMitigate Algorithmic Bias through Transparent Governance: Other social media providers can emulate Mastodon\u2019s transparent governance model to reduce algorithmic bias effectively. This involves enabling open discussions and scrutiny of the decision-making process."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "7",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "VII Conclusion",
|
| 75 |
+
"text": "This paper calls for action from the government, social media providers, and educators to work collaboratively to protect our young generation and solve social media\u2019s psychological impact. Government legislation plays a critical role in establishing the regulatory framework to establish safety rules for social media providers to protect children online. However, the enforcement of the legislation must be established. The phenomenon of doom scrolling exacerbates these mental health issues, creating a cycle of harmful content consumption that is particularly detrimental to younger, impressionable minds. The social media industry must collaborate and take initiatives such as enhanced content moderation and algorithmic adjustments. In addition, social media providers must have children-friendly safety features and bring greater accountability for their platforms.\nFurthermore, exploring alternative platforms, such as Mastodon, offers a glimpse into the potential of decentralized social media models that prioritize user control and privacy. While these platforms have challenges, they represent a valuable direction for future research and development. In summary, addressing the impact of social media on children demands an integrated approach, combining legislative action, industry responsibility, educational initiatives, and continued innovation in social media platforms."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [],
|
| 79 |
+
"tables": {
|
| 80 |
+
"1": {
|
| 81 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE I: </span>Recently Introduced or Passed U.S. Social Media Legislation</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.1.1.1.1.1\" style=\"width:56.9pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1\">U.S. Legislation</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.1.1.2.1.1\" style=\"width:113.8pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.2.1.1.1\">Purpose</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.1.1.3.1.1\" style=\"width:42.7pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.3.1.1.1\">Status</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.2.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.2.1.1.1.1\" style=\"width:56.9pt;\">STOP CSAM Act of 2023<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">stopcasm</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.2.1.2.1.1\" style=\"width:113.8pt;\">Protects children from sexual exploitation and promotes accountability and transparency in the technology industry.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.2.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.2.1.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.2.1.3.1.1\" style=\"width:42.7pt;\">Introduced</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.3.2.1.1.1\" style=\"width:56.9pt;\">Kids Off Social Media Act<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">UScongress</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.3.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.3.2.2.1.1\" style=\"width:113.8pt;\">Social Media providers must verify age, prohibit algorithmic recommendations, require parental consent for users under the age of 18, and prohibit social media platform access to children under the age of 13.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.3.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.3.2.3.1.1\" style=\"width:42.7pt;\">Introduced</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.4.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.4.3.1.1.1\" style=\"width:56.9pt;\">Utah Minor Protection in Social Media Act (S.B. 194) <cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">utah194</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.4.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.4.3.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.4.3.2.1.1\" style=\"width:113.8pt;\">Social Media providers must verify age, enable maximum privacy settings, provide parental consent mechanisms, and protect minors\u2019 data.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.4.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.4.3.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.4.3.3.1.1\" style=\"width:42.7pt;\">Passed. It will be enacted from October 01, 2024</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.5.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.5.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.5.4.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.5.4.1.1.1\" style=\"width:56.9pt;\">Utah Social Media Amendments (H.B.464)<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">utah464</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.5.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.5.4.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.5.4.2.1.1\" style=\"width:113.8pt;\">Parents can sue social media providers if social media causes mental health issues, limit social media algorithmic content to 3 hours, restrict access between 10:30 PM and 6:30 AM, and require parents\u2019 consent for minors.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.5.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.5.4.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.5.4.3.1.1\" style=\"width:42.7pt;\">Passed. It will be enacted from October 01, 2024</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.6.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.6.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.6.5.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.6.5.1.1.1\" style=\"width:56.9pt;\">New York Stop Addictive Feeds Exploitation (SAFE) for Kids Act (Senate Bill S7694A) <cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">nysenate</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.6.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.6.5.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.6.5.2.1.1\" style=\"width:113.8pt;\">Prevent addictive feeds and limit the night use of children\u2019s social media accounts.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.6.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.6.5.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.6.5.3.1.1\" style=\"width:42.7pt;\">Passed on June, 2024</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.7.6\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.7.6.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.7.6.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.7.6.1.1.1\" style=\"width:56.9pt;\">EARN IT Act<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">earnitact</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.7.6.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.7.6.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.7.6.2.1.1\" style=\"width:113.8pt;\">Prevention of online sexual exploitation of children, establishing the National Commission on Online Child Sexual Exploitation Prevention, and reporting requirements for social media providers.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S4.T1.1.7.6.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.7.6.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.7.6.3.1.1\" style=\"width:42.7pt;\">Introduced</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.8.7\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S4.T1.1.8.7.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.8.7.1.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.8.7.1.1.1\" style=\"width:56.9pt;\">Eyes on the Board Act<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">US2023</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.1.8.7.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.8.7.2.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.8.7.2.1.1\" style=\"width:113.8pt;\">Schools must limit and monitor social media usage in school networks and devices.</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S4.T1.1.8.7.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S4.T1.1.8.7.3.1\">\n<span class=\"ltx_p\" id=\"S4.T1.1.8.7.3.1.1\" style=\"width:42.7pt;\">Introduced</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 82 |
+
"capture": "TABLE I: Recently Introduced or Passed U.S. Social Media Legislation"
|
| 83 |
+
},
|
| 84 |
+
"2": {
|
| 85 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">TABLE II: </span>Mastodon vs. Centralized Social Media: A Comparison of Key Features</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.1.1.1.1\" style=\"width:42.7pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.1.1.1.1\">Feature</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.1.2.1.1\" style=\"width:85.4pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.2.1.1.1\">Mastodon</span></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_align_top ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.1.3.1.1\" style=\"width:85.4pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S5.T2.1.1.1.3.1.1.1\">Centralized Social Media</span></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.2.1\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.1.1.1\" style=\"width:42.7pt;\">Platform Ownership</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.2.1.1\" style=\"width:85.4pt;\">Each instance is owned by its administrators</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.3.1.1\" style=\"width:85.4pt;\">Owned by a single company or organization <cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">LACAVA2022100220</span>]</cite></span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.3.2\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.1.1.1\" style=\"width:42.7pt;\">Content Moderation</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.2.1.1\" style=\"width:85.4pt;\">Each instance has its own moderation team and rules</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.3.1.1\" style=\"width:85.4pt;\">Single moderation team and rules apply across the platform</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.4.3\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.1.1.1\" style=\"width:42.7pt;\">Network Connectivity</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.2.1.1\" style=\"width:85.4pt;\">Instances communicate with each other through open standards (e.g., ActivityPub)<cite class=\"ltx_cite ltx_citemacro_cite\">[<span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">LACAVA2022100220</span>, <span class=\"ltx_ref ltx_missing_citation ltx_ref_self\">ACM16Mastodon</span>]</cite></span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.3.1.1\" style=\"width:85.4pt;\">Users connect to a single server or hub</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.5.4\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.1.1.1\" style=\"width:42.7pt;\">Scalability</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.2.1.1\" style=\"width:85.4pt;\">Scalable through instance federation (multiple instances can communicate with each other)</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.3.1.1\" style=\"width:85.4pt;\">Scalable through infrastructure upgrades and load balancing</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.6.5\">\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_l ltx_border_r ltx_border_t\" id=\"S5.T2.1.6.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.6.5.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.6.5.1.1.1\" style=\"width:42.7pt;\">Governance</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.1.6.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.6.5.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.6.5.2.1.1\" style=\"width:85.4pt;\">Each instance has its own community-driven moderation and decision-making process</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_align_top ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.1.6.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.6.5.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.6.5.3.1.1\" style=\"width:85.4pt;\">A single entity makes decisions about platform policies and moderation</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 86 |
+
"capture": "TABLE II: Mastodon vs. Centralized Social Media: A Comparison of Key Features"
|
| 87 |
+
}
|
| 88 |
+
},
|
| 89 |
+
"image_paths": {},
|
| 90 |
+
"validation": true,
|
| 91 |
+
"references": [],
|
| 92 |
+
"url": "http://arxiv.org/html/2408.10351v1"
|
| 93 |
+
}
|
20240819/2408.10383v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20240819/2408.10384v1.json
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Empirical risk minimization for risk-neutral composite optimal control with applications to bang-bang control1footnote 11footnote 1Funding: This material is based upon work supported by the National Science Foundation under Grant No. DMS-2410944.",
|
| 3 |
+
"abstract": "Nonsmooth composite optimization problems under uncertainty are prevalent in various scientific and engineering applications.\nWe consider risk-neutral composite optimal control problems, where the objective function is the sum of\na potentially nonconvex expectation function and a nonsmooth convex function.\nTo approximate the risk-neutral optimization problems, we use a Monte Carlo sample-based approach,\nstudy its asymptotic consistency, and derive nonasymptotic sample size estimates.\nOur analyses leverage problem structure commonly encountered in PDE-constrained optimization problems, including compact embeddings and growth conditions. We apply our findings to bang-bang-type optimal control problems and propose the use of a conditional gradient method to solve them effectively.\nWe present numerical illustrations.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "In this paper, we consider composite risk-neutral optimization problems of the form\nis a compact linear operator\nbetween the Hilbert space and\nthe Banach space , \nis a smooth integrand, and is a proper, closed, and convex but potentially nonsmooth. Moreover, denotes a random element, which maps to a complete, separable metric\nspace . While ( ###reference_###) covers a variety of challenging settings, the focus of the present work lies on risk-neutral PDE-constrained optimal control problems\nwhere is a tracking-type functional,\nand the choice of the deterministic control influences the behavior of the solution to a PDE with random inputs .\nNonconvex optimization problems governed by differential equations\narise in a multitude of application areas, such as\nsensor placement [2 ###reference_b2###], resource assessment of renewable tidal energy [8 ###reference_b8###],\nand design of groundwater remediation systems [9 ###reference_b9###]. It is well known that the proper choice of the penalty function in Problem 1.1 ###reference_### promotes certain structural features in its minimizers . For example, setting\ndenotes its indicator function, and , tends to provide solutions satisfying a bang-bang-off principle, that is, for a.e. .\nAs a consequence, composite optimal control problems also arise from relaxations of mixed-integer\noptimal control problems [8 ###reference_b8###].\nIn order to handle the expected value in ( ###reference_###) numerically, we approximate it using the\nsample average approximation (SAA) approach [12 ###reference_b12###].\nMore specifically,\ngiven a sequence defined on a complete probability space\n\nof independent -valued random elements\nsuch that each has the same distribution as , we consider the SAA problem\nFor brevity, we often omit the second argument\nof .\nA central topic arising in this context is the question whether this type of approximation is asymptotically consistent and if the proximity of its solutions and critical points towards their counterpart in ( ###reference_###) can be quantified in terms of the sample size .\nAlthough this already represents a formidable problem for smooth objective functions, the potential lack of strong convexity of aggravates the problem further, adding additional challenges to the theoretical analysis of the problem and its efficient solution."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Contributions",
|
| 15 |
+
"text": "In the present paper, we investigate the SAA approach for nonsmooth, potentially nonconvex risk-neutral minimization problems of the form ( ###reference_###) from a qualitative and a quantitative perspective. Since Problem ( ###reference_###) is potentially nonconvex, our analysis addresses the behaviour its solutions and its critical points. In the context of the present manuscript, the latter refers to points satisfying the first-order necessary subdifferential inclusion\nor, equivalently, zeros of the gap functional\nCritical points as well as the associated gap function for Problem ( ###reference_###) are defined analogously.\nMore specifically, the contributions of the paper are as follows:\nFor general smooth fidelity terms , we show that the random variables realizing the optimal values, the solutions, and the critical points of Problem ( ###reference_###) asymptotically approximate their deterministic counterparts of Problem ( ###reference_###) with probability one (w.p. ) as the sample size tends to infinity.\nWe establish nonasymptotic expectation bounds for the gap functional value of arbitrary sequences of random vectors. More precisely, we demonstrate that, for every there holds\nwhere , ,\ndenotes the -covering number of the compact set and explicitly depends on the\nradius of the domain of as well as the smoothness of the integrand .\nWhile Problem ( ###reference_###) is typically not strongly convex, we provide nonaymptotic expectation bounds for the approximation of minimizers to ( ###reference_###) by random vectors realizing solutions of Problem ( ###reference_###) if the integrand is convex for\neach ,\nand a growth-type condition on the partial linearization of at a minimizer \nholds true. Specifically, if\nfor some and a Banach space with , then\nwhere depends on\nthe smoothness of the integrand, and .\nHence, the SAA solutions converge in expectation at the usual Monte Carlo rate of while both the value of the gap functional and the suboptimality in exhibit superconvergence at a rate of . This convergence statement is valid for linear bounded operators\n without their compactness.\nOur framework is applied to PDE-constrained optimization problems governed by affine-linear and bilinear elliptic equations, which allow for the use of bang-bang-off regularization terms. Reproducible numerical experiments empirically verify our theoretical results and further highlight the utility of the SAA approach for infinite-dimensional, nonsmooth problems."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Related work",
|
| 21 |
+
"text": "Monte Carlo sample-based approximations are common\ndiscretization approaches for risk-neutral and risk-averse huge-scale\noptimization, particularly in PDE-constrained optimization [19 ###reference_b19###], and\nstochastic optimization [34 ###reference_b34###]. The theoretical analyses of this approximation approach\nmay be categorized into asymptotic and nonasymptotic ones.\nIn PDE-constrained\noptimization under uncertainty with strongly convex control regularizers, the\npapers [11 ###reference_b11###, 18 ###reference_b18###, 20 ###reference_b20###, 31 ###reference_b31###] provide\nnonasymptotic analyses, such as sample size estimates and central limit-type\ntheorems. Moreover, [19 ###reference_b19###, 24 ###reference_b24###, 25 ###reference_b25###] provide asymptotic\nconsistency results for nonconvex infinite-dimensional stochastic\noptimization.\nThe problem structure given by the compact linear operator in (4.1 ###reference_###)\nis common among PDE-constrained optimization problems and\nhas been used for different purposes in the literature.\nFor example, the authors of [25 ###reference_b25###] have employed it to demonstrate\nthe consistency of empirical estimators for risk-averse stochastic optimization.\nFinally, [26 ###reference_b26###] establishes nonasymptotic sample size\nestimates for risk-neutral semilinear PDE-constrained optimization. In the field of PDE-constrained\noptimization under uncertainty, current SAA analyzes require certain\ncontrol regularizers, such as standard Tikhonov regularizers\n[11 ###reference_b11###, 21 ###reference_b21###], R-functions [25 ###reference_b25###], and Kadec functions [24 ###reference_b24###]. These requirements exclude\nthe -norm as a control regularizer, for example.\nUsing the problem structure given by the compact linear operator in (4.1 ###reference_###),\nwe are able to extend previous results to general convex control regularizers.\nAs mentioned in the previous section, our sample size estimates for convex problems do not depend on the compactness of the feasible set or of the operator\n. Sample size estimates for stochastic convex optimization without any compactness conditions are also established, for example, in [15 ###reference_b15###, 21 ###reference_b21###, 33 ###reference_b33###]."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "1.3",
|
| 25 |
+
"parent_section_id": "1",
|
| 26 |
+
"section_name": "Outline",
|
| 27 |
+
"text": "Section 2 ###reference_### introduces notation and terminology.\nSection 3 ###reference_### introduces assumptions\non the risk-neutral composite control problems, studies the existence\nof solutions, states gap functional-based optimality conditions, and establishes consistency of SAA optimal values,\nsolutions and critical points. Section 3 ###reference_###\nalso derives sample size estimates for nonconvex and convex\nproblems. Our framework is applied to linear and bilinear PDE-constrained\noptimization in section 4 ###reference_###. Section 5 ###reference_###\npresents numerical illustrations. The appendix derives uniform\nexpectation bounds."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Notation and terminology",
|
| 33 |
+
"text": "Throughout the text, normed vector spaces are defined over the reals. Metric spaces \nare equipped with their Borel sigma-field .\nMoreover, we identify the control space , a separable\nHilbert space, with , and write .\nFor a Banach\nspace with norm , we denote by \nits topological dual space and by its\nduality pairing. If the norm is clear from the context, we write instead\nof . The inner product on a Hilbert space is denoted\nby .\nIf and are Banach spaces with\n, and \ndefined by is linear and bounded, then we say that \nis (continuously) embedded into . We abbreviate such embeddings by\n. A linear operator between two Banach spaces is called\ncompact if its image of the domain space\u2019s unit ball is precompact.\nFor a linear bounded operator \nbetween Banach spaces, \ndenotes its adjoint operator.\nFor a measurable space and\nmetric spaces and , is\ncalled Carath\u00e9odory mapping if is continuous for all\n and is -\nmeasurable for all .\nFor a bounded domain , we denote by\n the standard\nLebesgue spaces,\nand by \nand (\nthe standard Sobolev spaces.\nWe set\n.\nFor a nonempty, totally bounded metric space , the -covering number\n, ,\ndenotes the minimal number of points\nin a -net of ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Risk-neutral composite optimal control",
|
| 39 |
+
"text": "We start by stating the main assumptions on Problem ( ###reference_###).\nFor this purpose, we abbreviate\nthe domain of by\nThe following assumption imposes the properties of and the smoothness and integrability requirements on the integrand.\nThe control space is a separable Hilbert space, and\n is a separable Banach space.\nThe operator\n is linear, and compact.\nThe function \nis proper, closed, convex, and is bounded.\nThe set \nis open, convex, and bounded\nwith .\nMoreover,\n is continuous\nin its first argument\non \nfor each \nand measurable in its second\nargument on for each .\nFor an integrable random variable\n, it holds that\nThe following assumption formulates\ndifferentiablity and integrability statements\nof derivatives.\nFor each , the mapping\nis continuously differentiable.\nThere exists a Carath\u00e9odory function such that\nThere exists an integrable random variable\n such that\nIf is continuously differentiable on a -neighborhood of , the differentiability of and the representation of its gradient are implied by the chain rule. From this perspective,\nAssumption 2 ###reference_umption2### will ensure that the gradients of and admit a composite structure even in the absence of the chain rule. While this might seem technical at first glance, it will allow us to fit challenging settings into the outlined abstract framework. For a particular example, we refer the reader to the bilinear PDE-constrained problem in section 4.2 ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Existence of solutions and necessary optimality conditions",
|
| 45 |
+
"text": "In this section, we show that both the risk-neutral problem\n( ###reference_###) and\nthe associated SAA problems\n( ###reference_###) admit solutions.\nWe also show\nthe measurability of the SAA optimal value and the existence of measurable SAA\nsolutions.\nMoreover, we introduce the particular form of first-order necessary optimality conditions used in the following sections.\nFor \nand , we denote by\n the optimal value of the SAA problem\n( ###reference_###).\nLet Assumption 1 ###reference_umption1### hold, and let . Then,\n(i) the risk-neutral problem ( ###reference_###) and\nfor each ,\nthe SAA problem\n( ###reference_###) admit solutions,\n(ii)\nthe function\n\nis measurable, and\n(iii)\nthere exists at least one measurable map\n\nsuch that for each ,\n solves\nthe SAA problem ( ###reference_###).\n(iv)\nIf, moreover,\nLABEL: and 2 ###reference_umption2###\nholds, then the composite functions\n and are continuously differentiable\non with gradients\n and\n,\nrespectively.\n(i)\nSince is weakly compact, the proof follows by standard arguments as well as Fatou\u2019s lemma.\n(ii)\u2013(iii)\nSince\n\nis Carath\u00e9odory\non ,\nmeasurability theorems on\nmarginal maps and inverse images [4 ###reference_b4###, Thms. 8.2.9 and 8.2.11]\nimply the assertions.\n(iv)\nThe assertions can be established\nusing standard arguments.\n\u220e\nPropositions 3.1 ###reference_definition1### and 2 ###reference_umption2###\nmotivate the definitions\nas mappings on and , respectively.\nWe note that these definitions are formal in the sense that\nwhile is well-defined, may not be continuously differentiable, see also the discussion following Assumption 2 ###reference_umption2###.\nAs with , we often omit\nthe second argument of .\nFurthermore, we define the gap functionals\n and\n by\nA point is referred to as critical point\nof ( ###reference_###) if\n.\nSimilarly, a point is called a critical point\nof the SAA problem ( ###reference_###) if\n. Critical points of ( ###reference_###) are the zeros\nof the gap function as\nsummarized in the following proposition.\nIf Assumptions 1 ###reference_umption1### and 2 ###reference_umption2###\nhold, then\n(i)\nan element is a critical point\nof ( ###reference_###) if and only if ,\n(ii)\na point is a critical point\nof ( ###reference_###) if and only if , and\n(iii)\n is weakly lower semicontinuous.\nParts (i) and (ii) can be established using arguments similar to those used\nto prove Theorem 2.4 in [13 ###reference_b13###].\nPart (iii) follows from Theorem 2.5 (c) in [10 ###reference_b10###]\nbecause is completely continuous.\n\u220e"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Consistency of SAA optimal values and solutions",
|
| 51 |
+
"text": "We establish the asymptotic consistency\nof SAA optimal values and\nthe weak consistency of SAA solutions.\nWe denote by the optimal value\nof the true problem ( ###reference_###).\nIf Assumption 1 ###reference_umption1### holds, then\n(i)\n as \nw.p. , and\n(ii)\nfor almost all , has\nat least one weak accumulation point in and every\nsuch point is a solution to\n( ###reference_###).\nBefore we establish parts (i) and (ii), let us note that the uniform\nlaw of large numbers [14 ###reference_b14###, Cor. 4:1]\nensures the\nexistence of a null set\n with \nsuch that for all , we have\nMoreover,\n is continuous on owing\nto the dominated convergence theorem.\n(i)\nWe have for all\n,\nCombined with (3.2 ###reference_###), we find that\n as \nfor all .\n(ii)\nFix .\nSince \nis bounded,\nit has a weak accumulation point.\nLet be a weak accumulation point\nof .\nBy assumption, there exists a subsequence\n of\n such that\n as .\nFix . We have\nUsing\nthe convergence statement in\n(3.2 ###reference_###),\nand\n\nas , we obtain\n\nand \nas .\nCombined with the fact that\n is weakly lower semicontinuous,\nSince occurs\nw.p. , we obtain the assertion.\n\u220e"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.3",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Consistency of SAA critical points",
|
| 57 |
+
"text": "We establish the asymptotic consistency of the SAA gap functional\nevaluated as approximate SAA critical\npoints as well as that of\nSAA critical points.\nLet Assumptions 1 ###reference_umption1### and 2 ###reference_umption2###\nhold, and let be a\nsequence with as .\nLet be a sequence of\nrandom vectors with w.p. .\nThen\n(i) as w.p. .\n(ii)\nFor almost all , has at least one weak accumulation point in \nand every such point is a zero of .\nBefore we establish parts (i) and (ii),\nlet us note that for all ,\nHence, we have\nfor all ,\nTherefore,\nthe uniform\nlaw of large numbers [14 ###reference_b14###, Cor. 4:1]\nensures the\nexistence of a null set\n with \nsuch that for all , we have\n(i)\nUsing ,\nthe error bound (3.4 ###reference_###),\nand the above convergence statement, we obtain\n as w.p. .\n(ii)\nFix such that\n as .\nSince \nis bounded, it has a weak accumulation point.\nIf as , then the weak lower semicontinuity of \nestablished in Proposition 3.2 ###reference_definition2###)\nand as imply the assertion.\n\u220e"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.4",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Sample size estimates for SAA critical points",
|
| 63 |
+
"text": "We establish expectation bounds and sample size estimates. Our analysis is inspired by that of [34 ###reference_b34###].\nFor this purpose, we formulate a Lipschitz continuity assumption and a light-tailed condition on the integrand\u2019s gradient,\nwhich are a typical assumptions in related contexts.\nFor an integrable random variable\n,\nFor some constant ,\n3 (b) ###reference_i2### is, for example, satisfied\nif there exists a constant such that\n for all .\nWe denote by the diameter\nof .\nLet Assumptions 1 ###reference_umption1###, 2 ###reference_umption2### and 3 ###reference_umption3### hold.\nFor each , let be measurable.\nThen, for all and ,\nUsing (3.3 ###reference_###), we have\nApplying Proposition A.1 ###reference_definition1###\nto , we obtain\n\u220e\nUsing Theorem 3.5 ###reference_definition5###, we establish\nsample size estimates.\nLet the hypotheses of Theorem 3.5 ###reference_definition5###\nhold.\nIf and\nthen\n.\nWe use (3.5 ###reference_###)\nwith .\nWe have . The choice\nof ensures\n\u220e\nThe sample size estimates in Corollary 3.6 ###reference_definition6### are based on\nthe covering numbers of . A typical example in PDE-constrained optimization\nmodels as the adjoint operator of the embedding .\nCombining the covering numbers for Sobolev function\nclasses in [6 ###reference_b6###, Thm. 1.7] and duality of metric entropy [3 ###reference_b3###, p. 1315],\ncovering numbers of can be derived."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.5",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "Expectation bounds for convex problems",
|
| 69 |
+
"text": "For convex problems, we demonstrate nonasymptotic expectation bounds\nfor the distance between SAA solutions and the true solution, the gap functional, and the optimality gap. These statements hold true without the compactness of the linear bounded operator . In the following, let be a solution to ( ###reference_###)\nand for each , let be a measurable solution to ( ###reference_###).\nThe space is\na Banach space with , is a\nseparable Hilbert space\nwith , and\nFor two constants \nand , it holds that\nThe mapping \nis integrable,\nand standard deviation-type constant\nis finite.\n4 (b) ###reference_i2###\nand its variants have been utilized, for example,\nin [13 ###reference_b13###] and, in particular,\nensures that\n is the unique solution to\n( ###reference_###).\nWe state the section\u2019s main result.\nIf 4 (a) ###reference_i1### holds true,\nlet be the embedding\nconstant of the embedding .\nIf in Assumption 4 ###reference_umption4###,\nthe following result ensures the\ntypical Monte Carlo convergence rate, ,\nfor the expected error .\nIf Assumptions 1 ###reference_umption1###, 2 ###reference_umption2### and 4 ###reference_umption4### hold,\nand is convex for all , then\nfor all ,\nThe proof is inspired by those of\nLemma 6 and Theorem 3 in [21 ###reference_b21###].\nUsing\n\nand 4 (b) ###reference_i2###, we obtain\nSince is continuously differentiable\non (see Proposition 3.1 ###reference_definition1###),\nand convex, we have\nAdding both estimates ensures\nHence\nTaking squares and\nusing the continuity of the embedding\n,\nwe obtain\nSince \n(see Proposition 3.1 ###reference_definition1###),\n, \nare independent and identically distributed,\nand is a\nseparable Hilbert space, we obtain\nThis implies the expectation bound.\n\u220e\nUsing this result, we further deduce superconvergence properties of the objective function values and the gap functional evaluated at SAA solutions.\nWe recall that denotes the objective\nfunction of ( ###reference_###).\nLet the hypotheses of Theorem 3.7 ###reference_definition7### hold with , and assume that there exists\n such that\nThen, for all , we have\nSince for all (see, for example, [13 ###reference_b13###, Thm. 2.4]), it suffices to show the claimed estimate for the gap functional. For this purpose, note that there exists such that\nwhere the first inequality follows from the optimality of . Using (3.7 ###reference_###), we have\nFollowing the proof of Lemma 4.8 in [13 ###reference_b13###], we arrive at\nwhere the final inequality is due to (3.7 ###reference_###).\nPutting together the pieces, we have\nTaking expectations, and using (3.6 ###reference_###) and Theorem 3.7 ###reference_definition7###,\nwe obtain the expectation bound.\n\u220e"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Application to bang-bang optimal control under uncertainty",
|
| 75 |
+
"text": "In this section, we apply our results to risk-neutral, nonsmooth PDE-constrained minimization problems of the form\nwhere , , is a bounded, polyhedral domain, , is a smooth fidelity term defined on and denotes the compact embedding. Moreover,\nThe deterministic control\n\nis coupled with the random state variable via a parametrized elliptic PDE\nwhich the state satisfies for every .\nThroughout this section, we consider\nIn the following, we show that both linear, that is, , as well as bilinear, that is, , control problems fit into the theoretical framework ( ###reference_###) defining\nas well as introducing the integrand\ndenotes a suitable parametrized control-to-state operator defined on the image of a -neighborhood of . For both problem classes, the following assumptions are made.\nThe function \nis convex and continuously differentiable. Its gradient\n is Lipschitz continuous with Lipschitz\nconstant .\nMoreover\n\nfor all ,\nwhere\n is a polynomial and nondecreasing.\nThe map is measurable\nand there exists \nsuch that for all .\nThe mapping \nis measurable\nand \nfor all .\nMoreover, there exists such that \nfor all and .\nWe point out that 5 (a) ###reference_i1### implies\nMoreover, 1 (a) ###reference_i1###, 1 (b) ###reference_i2### and 1 (c) ###reference_i3### are clearly satisfied. For abbreviation, let be the Friedrichs constant of the domain , which equals the operator norm of ."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.1",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Risk-neutral affine-linear PDE-constrained optimization",
|
| 81 |
+
"text": "We first consider (4.1 ###reference_###) governed by a class of affine-linear, parameterized PDEs, that is, . For simplicity, set . We consider the operator\nwhere\nand the associated parametrized equation\nThe following lemma is a direct consequence of the Lax\u2013Milgram lemma, the implicit function theorem as well as standard regularity results for elliptic PDEs. Its proof is omitted for the sake of brevity.\nLet Assumption 5 ###reference_umption5###\nhold with .\nFor every , there exists a unique satisfying (4.5 ###reference_###). The mapping \nis infinitely many times continuously differentiable, and we have\nMoreover, if ,\nthen , and there exists a constant such that\nFinally, the mapping\nis continuous.\nIn the following, let be an arbitrary but fixed convex, bounded neighborhood of , and let be such that for all .\nDefining the parametrized control-to-state operator\nwe can verify the remaining assumptions in the linear case.\nIf Assumption 5 ###reference_umption5###\nhold with , then\n in (4.3 ###reference_###) is Carath\u00e9odory, and\nThe Carath\u00e9odory property of follows from the continuity of and that of the mapping from Lemma 4.1 ###reference_definition1###. Moreover, leveraging 5 (a) ###reference_i1### and (4.6 ###reference_###) yields\n\u220e\nNext, we verify Assumptions 2 ###reference_umption2### and 5 (a) ###reference_i1###.\nLet Assumption 5 ###reference_umption5###\nhold with .\nWe define\nThe mapping is Carath\u00e9doroy and, for every , the function\nis continuously differentiable on with . Moreover, we have the estimates\nFinally, we have\nThe Carath\u00e9odory property of follows analogously to Lemma 4.2 ###reference_definition2###.\nMoreover, the differentiability of and the representation of its gradient are direct consequences of the chain rule as well as standard adjoint calculus, respectively. Using this representation and (4.6 ###reference_###), we obtain\nFurthermore, by applying (4.4 ###reference_###), we have\nwhere the last inequality follows again by using (4.6 ###reference_###). Combining both inequalities yields the desired estimate. The Lipschitz estimate for follows analogously noting that\nand thus\nfor all and .\n\u220e\nNext, we discuss sufficient conditions for Assumption 4 ###reference_umption4### with the particular choice of and . In this case, 4 (a) ###reference_i1### is satisfied. In the following, we denote by a solution to (4.1 ###reference_###). We recall the identity .\nLet Assumption 5 ###reference_umption5###\nhold with .\nThen, the mapping is measurable, and\nIf, moreover, there exist constants and such that\nthen there exists such that\nLemma 4.1 ###reference_definition1### ensures as well as the claimed measurability.\nUsing (4.7 ###reference_###) and (4.8 ###reference_###), we arrive at\nTaking expectations yields the expectation bound.\nFinally, the growth condition follows analogously to Proposition 5.5 in [13 ###reference_b13###].\n\u220e\nFinally, we comment on the uniform Lipschitz continuity assumed in Corollary 3.8 ###reference_definition8###.\nLet Assumption 5 ###reference_umption5###\nhold with .\nAssume that the mapping from 5 (c) ###reference_i3### satisfies all \nand some . Then, there holds\nwhere is the constant from Lemma 4.1 ###reference_definition1###, and denotes the embedding constant of .\nSimilarly to the proof of Lemma 4.3 ###reference_definition3###, we obtain\nusing (4.7 ###reference_###) as well as the assumed boundedness of . Furthermore, by standard adjoint arguments and by again exploiting (4.7 ###reference_###), we arrive at\nCombining both observations yields the desired estimate.\n\u220e"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.2",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Risk-neutral bilinear PDE-constrained optimization",
|
| 87 |
+
"text": "Next, we show that bilinear control problems, that is, , also fit our abstract setting. For this purpose, we assume \nfor a.e. .\nSimilar to the previous example, consider the operator\nwhere\nWe readily verify that is infinitely many times continuously differentiable.\nNow, define the open set\nwhere\nMoreover, \nis the open -unit ball,\nand is the embedding constant of .\nWe consider the equation\nThe following result is a consequence of the implicit function theorem.\nThe constant in the following lemma is equal to that in Lemma 4.1 ###reference_definition1###.\nLet Assumption 5 ###reference_umption5### hold with \nfor a.e. .\nFor every , there exists a unique satisfying (4.9 ###reference_###). The mapping \nis infinitely many times continuously differentiable, and we have\nMoreover, if , then , we have\nand the mapping\nis continuous.\nGiven , the existence of a unique solution to (4.9 ###reference_###) and the a priori estimate in (4.10 ###reference_###) follow from the Lax\u2013Milgram lemma and the definitions of and , see also [24 ###reference_b24###, sect. 7.1]. Similarly, the higher regularity and the estimate in (4.11 ###reference_###) follow by a standard bootstrapping argument. It remains to discuss the regularity of the solution mapping. Starting with the -result, note that there holds\nTogether with (4.10 ###reference_###), this implies that is a Banach space isomorphism. Hence, the smoothness of with respect to its inputs follows from the implicit function theorem. In order to verify the -continuity, let\ndenote two admissible triples and let be the associate solutions of (4.9 ###reference_###). The difference is the unique solution of\nwhere we identify\nInvoking (4.11 ###reference_###), we thus get\nand together with\nfor some , the claimed continuity result follows.\n\u220e\nFrom this point on, we would like to\nargue as in the previous section by introducing an appropriate control-to-state operator in order to define the integrand . In the bilinear setting, however, this requires additional care, as the equation in (4.9 ###reference_###) might not be well-posed for controls in a -neighborhood of .\nTaking this into account, we set and consider the mapping\nHere, is the restriction of the compact operator , and denotes is noncontinuous inverse. Using the following lemma, we deduce the Lipschitz continuity of with respect to the control.\nIf is a bounded domain, then there exists a constant\n such that for all\n and , we have\n\nand\nThis is a consequence of\nTheorem 7.4 in [5 ###reference_b5###], the -trace operator\u2019s continuity, and Friedrichs\u2019 inequality.\n\u220e\nLet Assumption 5 ###reference_umption5### hold with \nfor a.e. .\nIf and , then\nLet be such that , . Since , we have , see Lemma 4.6 ###reference_definition6###.\nConsequently, see Lemma 4.7 ###reference_definition7###, induces a linear continuous functional on with\nfor all . The claimed result then follows from (4.10 ###reference_###) noting that\n\u220e\nThe following lemma verifies Assumption 1 ###reference_umption1### for the integrand\n in (4.3 ###reference_###).\nLet Assumption 5 ###reference_umption5### hold with \nfor a.e. .\nThen\nMoreover, is continuous\nin its first argument\non \nfor each \nand measurable in its second\nargument on for each .\nThe measurability and continuity statements of follow from the continuity of \nand that of (see Lemma 4.6 ###reference_definition6###), 5 (a) ###reference_i1###, and the Lipschitz continuity of for fixed . The stability estimate follows similarly to Lemma 4.2 ###reference_definition2### by using (4.10 ###reference_###).\n\u220e\nNext, we verify Assumptions 2 ###reference_umption2### and 3 (a) ###reference_i1###. We define\nLet Assumption 5 ###reference_umption5### hold with \nfor a.e. .\nFor each , consider the mapping\nThe function is continuous differentiable on , and there holds\nMoreover, for every , we have , the mapping\nis Carath\u00e9odory, and we have the a priori estimates\nas well as for all ,\nMoreover, there exists an integrable random variable such that\nThe statement on the differentiability of is a direct consequence of the chain rule and adjoint calculus. If , then Lemma 4.6 ###reference_definition6### implies and thus, see Lemma 4.7 ###reference_definition7###, . The Carath\u00e9odory property of follows again from the continuity of and .\nInvoking the stability estimates of Lemma 4.6 ###reference_definition6###, we further obtain\nfor all , and\nAs a consequence, we obtain\nfor all . Moreover, if , then\nIn particular, owing to Assumption 5 ###reference_umption5###, (4.11 ###reference_###) and (4.13 ###reference_###), there exists a square integrable random variable such that\nas well as\nIn order to establish the claimed Lipschitz continuity, fix and , and split\nFor estimating the norm of the first difference, we note that\nusing Lemma 4.8 ###reference_definition8###. Similarly, arguing along the lines of proof in Lemma 4.8 ###reference_definition8###, we get\nAs a consequence, we have\nFinally, we decompose and estimate\nfor some . By Friedrichs\u2019 inequality, the claimed Lipschitz statement follows.\n\u220e\nLemma 4.10 ###reference_definition10###\nverifies Assumptions 2 ###reference_umption2### and 3 (a) ###reference_i1###.\nMoreover, as in the linear setting, 3 (b) ###reference_i2### is satisfied since is uniformly bounded on ."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Numerical illustrations",
|
| 93 |
+
"text": "We present numerical illustrations for two instances of the risk-neutral problems analyzed\nin section 4 ###reference_###. The section\u2019s main objective is to illustrate\nthe theoretical results established in sections 3.4 ###reference_### and 3.5 ###reference_###.\nBefore presenting numerical results, we discuss problem data,\ndiscretization aspects, implementation details, and the computation of reference solutions\nand gap functions."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.1",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "Affine-linear problem",
|
| 99 |
+
"text": "We chose , , and\n.\nFigure 1 ###reference_### depicts\nnominal solution and a reference SAA solution.\nFigure 2 ###reference_### depicts convergence\nrates for empirical estimates of the optimality gap and\n over the sample size .\nWe used realizations to estimate these means, and computed\nconvergence rates using least squares.\nFigure 3 ###reference_### depicts convergence\nrates of the SAA gap function\u2019s empirical means.\nThe empirical convergence rates closely match the theoretical ones.\n###figure_1### ###figure_2### ###figure_3### ###figure_4### ###figure_5### ###figure_6###"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5.2",
|
| 103 |
+
"parent_section_id": "5",
|
| 104 |
+
"section_name": "Bilinear problem",
|
| 105 |
+
"text": "We chose the constant control bounds and , and .\nFigure 4 ###reference_### depicts\nnominal critical points and a reference SAA critical point.\nFigure 3 ###reference_### depicts convergence\nrates of the SAA gap function\u2019s empirical means. These rates are faster than predicted by the theory, see Corollary 3.6 ###reference_definition6###. We think that this can be attributed to the fact that the covering number ansatz does not exploit higher-order regularity of the integrand and, in particular, potential local curvature around isolated minimizers. While a closer inspection of this improved convergence behavior is certainly of interest, it goes beyond the scope of the current work and is left for future research.\n###figure_7### ###figure_8###"
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "Discussion",
|
| 111 |
+
"text": "In this paper, we have analyzed the SAA approach for risk-neutral optimization problems that incorporate a nonsmooth but convex regularization term. Our main results address the asymptotic consistency of this scheme, and the derivation of nonasymptotic sample size estimates for various optimality measures. The latter, as well as the employed techniques, come in two different flavours: for the general nonconvex case, we provide estimates on the expected value of the gap functional by applying the covering number approach. For convex objectives, and by relying on common growth conditions, we prove stronger results including convergence rates for the expected distance between minimizers, improved estimates for the gap functional, and the suboptimality in the objective function value. The presented abstract framework is applied to both linear and bilinear PDE-constrained problems under uncertainty. We also use these applications to empirically verify the sharpness of our convergence guarantees.\nOur investigation also raises new questions about the SAA approach for nonsmooth minimization problems. These include, for example, the extension of the results in section 3.5 ###reference_### to nonconvex problems assuming suitable second-order optimality conditions and taking into account the potential existence of multiple global and/or local minimizers. Moreover, an extension of the current results to risk-averse stochastic optimization and variational inequalities may be of interest."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [
|
| 115 |
+
{
|
| 116 |
+
"section_id": "Appendix 1",
|
| 117 |
+
"parent_section_id": null,
|
| 118 |
+
"section_name": "Appendix A Uniform expectation bounds for expectation mappings",
|
| 119 |
+
"text": "We establish essentially known uniform expectation bounds for integrands defined on potentially infinite-dimensional\nspaces.\nWhile the techniques used in this section are\nstandard in the literature on stochastic programming, the bounds are instrumental for\nestablishing one of our main result in section 3.4 ###reference_###.\nLet be a complete probability space,\nand let be a random element with image space .\nMoreover, let be sequence of independent -valued random elements\ndefined on a complete probability space, each having the same distribution as .\nThe set is a nonempty, closed,\nbounded, convex\nsubset of a reflexive Banach space\n, is a Banach space, and is linear and compact.\nThe space is a separable Hilbert space,\nand \nis a Carath\u00e9odory function.\nFor an integrable random variable ,\n is Lipschitz continuous\nwith Lipschitz constant \nfor each .\nThere exists such that for all , we have\n.\nWe define ,\n, and .\nIf Assumption 6 ###reference_umption6### holds, then\nfor each and ,\nThe proof is inspired by those\nof Theorems 9.84 and 9.86 in [34 ###reference_b34###].\nWe have\n\nand\n\nfor all ,\nwhere .\nWe define .\nSince is compact,\n is finite and there exist \nsuch that for each ,\nwe have ,\nwhere .\nFor all ,\nHence\nUsing Theorem 3 in [30 ###reference_b30###]\nand Lemma 1 in [21 ###reference_b21###], we have\nNow, Lemma B.5 in [26 ###reference_b26###] ensures\n.\nCombined with , we obtain the expectation bound.\n\u220e"
|
| 120 |
+
}
|
| 121 |
+
],
|
| 122 |
+
"tables": {},
|
| 123 |
+
"image_paths": {
|
| 124 |
+
"1(a)": {
|
| 125 |
+
"figure_path": "2408.10384v1_figure_1(a).png",
|
| 126 |
+
"caption": "(a)\nFigure 1: \nFor the affine-linear control\nproblem,\nnominal solution (left),\nand reference SAA solution u\u2217superscript\ud835\udc62u^{*}italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT\nwith N=Nref\ud835\udc41subscript\ud835\udc41refN=N_{\\text{ref}}italic_N = italic_N start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT (right).",
|
| 127 |
+
"url": "http://arxiv.org/html/2408.10384v1/x1.png"
|
| 128 |
+
},
|
| 129 |
+
"1(b)": {
|
| 130 |
+
"figure_path": "2408.10384v1_figure_1(b).png",
|
| 131 |
+
"caption": "(b)\nFigure 1: \nFor the affine-linear control\nproblem,\nnominal solution (left),\nand reference SAA solution u\u2217superscript\ud835\udc62u^{*}italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT\nwith N=Nref\ud835\udc41subscript\ud835\udc41refN=N_{\\text{ref}}italic_N = italic_N start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT (right).",
|
| 132 |
+
"url": "http://arxiv.org/html/2408.10384v1/x2.png"
|
| 133 |
+
},
|
| 134 |
+
"2(a)": {
|
| 135 |
+
"figure_path": "2408.10384v1_figure_2(a).png",
|
| 136 |
+
"caption": "(a)\nFigure 2: For the affine-linear control\nproblem,\nempirical estimate of\n\ud835\udd3c\u2062[Gref\u2062(uN\u2217)\u2212Gref\u2062(u\u2217)]\ud835\udd3cdelimited-[]subscript\ud835\udc3arefsuperscriptsubscript\ud835\udc62\ud835\udc41subscript\ud835\udc3arefsuperscript\ud835\udc62\\mathbb{E}[G_{\\text{ref}}(u_{N}^{*})-G_{\\text{ref}}(u^{*})]blackboard_E [ italic_G start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) - italic_G start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) ]\nas a function of the sample size N\ud835\udc41Nitalic_N\n(left) and empirical estimate of\n\ud835\udd3c\u2062[\u2016uN\u2217\u2212u\u2217\u2016L1\u2062(D)]\ud835\udd3cdelimited-[]subscriptnormsuperscriptsubscript\ud835\udc62\ud835\udc41superscript\ud835\udc62superscript\ud835\udc3f1\ud835\udc37\\mathbb{E}[\\|u_{N}^{*}-u^{*}\\|_{L^{1}(D)}]blackboard_E [ \u2225 italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT - italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT \u2225 start_POSTSUBSCRIPT italic_L start_POSTSUPERSCRIPT 1 end_POSTSUPERSCRIPT ( italic_D ) end_POSTSUBSCRIPT ]\nas a function of the sample size N\ud835\udc41Nitalic_N\n(right).",
|
| 137 |
+
"url": "http://arxiv.org/html/2408.10384v1/x3.png"
|
| 138 |
+
},
|
| 139 |
+
"2(b)": {
|
| 140 |
+
"figure_path": "2408.10384v1_figure_2(b).png",
|
| 141 |
+
"caption": "(b)\nFigure 2: For the affine-linear control\nproblem,\nempirical estimate of\n\ud835\udd3c\u2062[Gref\u2062(uN\u2217)\u2212Gref\u2062(u\u2217)]\ud835\udd3cdelimited-[]subscript\ud835\udc3arefsuperscriptsubscript\ud835\udc62\ud835\udc41subscript\ud835\udc3arefsuperscript\ud835\udc62\\mathbb{E}[G_{\\text{ref}}(u_{N}^{*})-G_{\\text{ref}}(u^{*})]blackboard_E [ italic_G start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) - italic_G start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) ]\nas a function of the sample size N\ud835\udc41Nitalic_N\n(left) and empirical estimate of\n\ud835\udd3c\u2062[\u2016uN\u2217\u2212u\u2217\u2016L1\u2062(D)]\ud835\udd3cdelimited-[]subscriptnormsuperscriptsubscript\ud835\udc62\ud835\udc41superscript\ud835\udc62superscript\ud835\udc3f1\ud835\udc37\\mathbb{E}[\\|u_{N}^{*}-u^{*}\\|_{L^{1}(D)}]blackboard_E [ \u2225 italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT - italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT \u2225 start_POSTSUBSCRIPT italic_L start_POSTSUPERSCRIPT 1 end_POSTSUPERSCRIPT ( italic_D ) end_POSTSUBSCRIPT ]\nas a function of the sample size N\ud835\udc41Nitalic_N\n(right).",
|
| 142 |
+
"url": "http://arxiv.org/html/2408.10384v1/x4.png"
|
| 143 |
+
},
|
| 144 |
+
"3(a)": {
|
| 145 |
+
"figure_path": "2408.10384v1_figure_3(a).png",
|
| 146 |
+
"caption": "(a)\nFigure 3: Empirical estimate of\n\ud835\udd3c\u2062[\u03a8ref\u2062(uN\u2217)]\ud835\udd3cdelimited-[]subscript\u03a8refsuperscriptsubscript\ud835\udc62\ud835\udc41\\mathbb{E}[\\Psi_{\\text{ref}}(u_{N}^{*})]blackboard_E [ roman_\u03a8 start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) ]\nas a function of the sample size N\ud835\udc41Nitalic_N\nfor the affine-linear control problem (left)\nand bilinear control problem (right).",
|
| 147 |
+
"url": "http://arxiv.org/html/2408.10384v1/x5.png"
|
| 148 |
+
},
|
| 149 |
+
"3(b)": {
|
| 150 |
+
"figure_path": "2408.10384v1_figure_3(b).png",
|
| 151 |
+
"caption": "(b)\nFigure 3: Empirical estimate of\n\ud835\udd3c\u2062[\u03a8ref\u2062(uN\u2217)]\ud835\udd3cdelimited-[]subscript\u03a8refsuperscriptsubscript\ud835\udc62\ud835\udc41\\mathbb{E}[\\Psi_{\\text{ref}}(u_{N}^{*})]blackboard_E [ roman_\u03a8 start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT ( italic_u start_POSTSUBSCRIPT italic_N end_POSTSUBSCRIPT start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT ) ]\nas a function of the sample size N\ud835\udc41Nitalic_N\nfor the affine-linear control problem (left)\nand bilinear control problem (right).",
|
| 152 |
+
"url": "http://arxiv.org/html/2408.10384v1/x6.png"
|
| 153 |
+
},
|
| 154 |
+
"4(a)": {
|
| 155 |
+
"figure_path": "2408.10384v1_figure_4(a).png",
|
| 156 |
+
"caption": "(a)\nFigure 4: For the bilinear control\nproblem,\nnominal critical point (left)\nand reference SAA critical point u\u2217superscript\ud835\udc62u^{*}italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT\nwith N=Nref\ud835\udc41subscript\ud835\udc41refN=N_{\\text{ref}}italic_N = italic_N start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT (right).",
|
| 157 |
+
"url": "http://arxiv.org/html/2408.10384v1/x7.png"
|
| 158 |
+
},
|
| 159 |
+
"4(b)": {
|
| 160 |
+
"figure_path": "2408.10384v1_figure_4(b).png",
|
| 161 |
+
"caption": "(b)\nFigure 4: For the bilinear control\nproblem,\nnominal critical point (left)\nand reference SAA critical point u\u2217superscript\ud835\udc62u^{*}italic_u start_POSTSUPERSCRIPT \u2217 end_POSTSUPERSCRIPT\nwith N=Nref\ud835\udc41subscript\ud835\udc41refN=N_{\\text{ref}}italic_N = italic_N start_POSTSUBSCRIPT ref end_POSTSUBSCRIPT (right).",
|
| 162 |
+
"url": "http://arxiv.org/html/2408.10384v1/x8.png"
|
| 163 |
+
}
|
| 164 |
+
},
|
| 165 |
+
"validation": true,
|
| 166 |
+
"references": [
|
| 167 |
+
{
|
| 168 |
+
"1": {
|
| 169 |
+
"title": "The FEniCS Project Version 1.5.",
|
| 170 |
+
"author": "M. S. Aln\u00e6s, J. Blechta, J. Hake, A. Johansson, B. Kehlet, A. Logg, C.\nRichardson, J. Ring, M. E. Rognes, and G. N. Wells.",
|
| 171 |
+
"venue": "Arch. Numer. Software, 3(100):9\u201323, 2015.",
|
| 172 |
+
"url": null
|
| 173 |
+
}
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"2": {
|
| 177 |
+
"title": "Optimal sensor placement considering both sensor faults under\nuncertainty and sensor clustering for vibration-based damage detection.",
|
| 178 |
+
"author": "H. An, B. D. Youn, and H. S. Kim.",
|
| 179 |
+
"venue": "Struct. Multidiscip. Optim., 65(3):Paper No. 102, 32, 2022.",
|
| 180 |
+
"url": null
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"3": {
|
| 185 |
+
"title": "Duality of metric entropy.",
|
| 186 |
+
"author": "S. Artstein, V. Milman, and S. J. Szarek.",
|
| 187 |
+
"venue": "Ann. of Math. (2), 159(3):1313\u20131328, 2004.",
|
| 188 |
+
"url": null
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"4": {
|
| 193 |
+
"title": "Set-Valued Analysis.",
|
| 194 |
+
"author": "J.-P. Aubin and H. Frankowska.",
|
| 195 |
+
"venue": "Springer, Boston, 2009.",
|
| 196 |
+
"url": null
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"5": {
|
| 201 |
+
"title": "Multiplication in Sobolev spaces, revisited.",
|
| 202 |
+
"author": "A. Behzadan and M. Holst.",
|
| 203 |
+
"venue": "Ark. Mat., 59(2):275\u2013306, 2021.",
|
| 204 |
+
"url": null
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"6": {
|
| 209 |
+
"title": "Quantitative Analysis in Sobolev Imbedding Theorems and\nApplications to Spectral Theory.",
|
| 210 |
+
"author": "M. \u0160. Birman and M. Z. Solomjak.",
|
| 211 |
+
"venue": "AMS, Providence, RI, 1980.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"7": {
|
| 217 |
+
"title": "A framework for automated PDE-constrained optimisation.",
|
| 218 |
+
"author": "S. W. Funke and P. E. Farrell.",
|
| 219 |
+
"venue": "preprint, https://arxiv.org/abs/1302.3894, 2013.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"8": {
|
| 225 |
+
"title": "Design optimisation and resource assessment for tidal-stream\nrenewable energy farms using a new continuous turbine approach.",
|
| 226 |
+
"author": "S. W. Funke, S. C. Kramer, and M. D. Piggott.",
|
| 227 |
+
"venue": "Renew. Energ., 99:1046\u20131061, 2016.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"9": {
|
| 233 |
+
"title": "Optimal remediation with well locations and pumping rates selected as\ncontinuous decision variables.",
|
| 234 |
+
"author": "J. Guan and M. Aral.",
|
| 235 |
+
"venue": "Journal of Hydrology, 221(1):20\u201342, 1999.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"10": {
|
| 241 |
+
"title": "Weak lower semi-continuity of the optimal value function and\napplications to worst-case robust optimal control problems.",
|
| 242 |
+
"author": "R. Herzog and F. Schmidt.",
|
| 243 |
+
"venue": "Optimization, 61(6):685\u2013697, 2012.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"11": {
|
| 249 |
+
"title": "On quantitative stability in infinite-dimensional optimization under\nuncertainty.",
|
| 250 |
+
"author": "M. Hoffhues, W. R\u00f6misch, and T. M. Surowiec.",
|
| 251 |
+
"venue": "Optim. Lett., 15(8):2733\u20132756, 2021.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"12": {
|
| 257 |
+
"title": "The sample average approximation method for stochastic discrete\noptimization.",
|
| 258 |
+
"author": "A. J. Kleywegt, A. Shapiro, and T. Homem-de Mello.",
|
| 259 |
+
"venue": "SIAM J. Optim., 12(2):479\u2013502, 2002.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"13": {
|
| 265 |
+
"title": "On fast convergence rates for generalized conditional gradient\nmethods with backtracking stepsize.",
|
| 266 |
+
"author": "K. Kunisch and D. Walter.",
|
| 267 |
+
"venue": "Numer. Algebra Control Optim., 14(1):108\u2013136, 2024.",
|
| 268 |
+
"url": null
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"14": {
|
| 273 |
+
"title": "On some asymptotic properties of maximum likelihood estimates and\nrelated Bayes\u2019 estimates.",
|
| 274 |
+
"author": "L. M. Le Cam.",
|
| 275 |
+
"venue": "Univ. California Publ. Stat. 1, pages 277\u2013329, 1953.",
|
| 276 |
+
"url": null
|
| 277 |
+
}
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"15": {
|
| 281 |
+
"title": "New sample complexity bounds for sample average approximation in\nheavy-tailed stochastic programming.",
|
| 282 |
+
"author": "H. Liu and J. Tong.",
|
| 283 |
+
"venue": "In Forty-first International Conference on Machine Learning,\n2024.",
|
| 284 |
+
"url": null
|
| 285 |
+
}
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"16": {
|
| 289 |
+
"title": "Automated Solution of Differential Equations by the\nFinite Element Method: The FEniCS Book.",
|
| 290 |
+
"author": "A. Logg, K.-A. Mardal, and G. N. Wells, editors.",
|
| 291 |
+
"venue": "Springer, Berlin, 2012.",
|
| 292 |
+
"url": null
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"17": {
|
| 297 |
+
"title": "An Introduction to Computational Stochastic PDEs.",
|
| 298 |
+
"author": "G. J. Lord, C. E. Powell, and T. Shardlow.",
|
| 299 |
+
"venue": "Cambridge University Press, Cambridge, 2014.",
|
| 300 |
+
"url": null
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"18": {
|
| 305 |
+
"title": "Complexity analysis of stochastic gradient methods for\nPDE-constrained optimal control problems with uncertain parameters.",
|
| 306 |
+
"author": "M. Martin, S. Krumscheid, and F. Nobile.",
|
| 307 |
+
"venue": "ESAIM Math. Model. Numer. Anal., 55(4):1599\u20131633, 2021.",
|
| 308 |
+
"url": null
|
| 309 |
+
}
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"19": {
|
| 313 |
+
"title": "Consistency of Monte Carlo estimators for risk-neutral\nPDE-constrained optimization.",
|
| 314 |
+
"author": "J. Milz.",
|
| 315 |
+
"venue": "Appl. Math. Optim., 87(57), 2023.",
|
| 316 |
+
"url": null
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"20": {
|
| 321 |
+
"title": "Reliable Error Estimates for Optimal Control of Linear\nElliptic PDEs with Random Inputs.",
|
| 322 |
+
"author": "J. Milz.",
|
| 323 |
+
"venue": "SIAM/ASA J. Uncertain. Quantif., 11(4):1139\u20131163, 2023.",
|
| 324 |
+
"url": null
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"21": {
|
| 329 |
+
"title": "Sample average approximations of strongly convex stochastic programs\nin Hilbert spaces.",
|
| 330 |
+
"author": "J. Milz.",
|
| 331 |
+
"venue": "Optim. Lett., 17:471\u2013492, 2023.",
|
| 332 |
+
"url": null
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"22": {
|
| 337 |
+
"title": "milzj/fw4pde: v1.0.2, February 2024.",
|
| 338 |
+
"author": "J. Milz.",
|
| 339 |
+
"venue": "doi:10.5281/zenodo.10644778.",
|
| 340 |
+
"url": null
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"23": {
|
| 345 |
+
"title": "Supplementary code for the manuscript: Empirical risk minimization\nfor risk-neutral composite optimal control with applications to bang-bang\ncontrol, August 2024.",
|
| 346 |
+
"author": "J. Milz.",
|
| 347 |
+
"venue": "doi:10.5281/zenodo.13336970.",
|
| 348 |
+
"url": null
|
| 349 |
+
}
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"24": {
|
| 353 |
+
"title": "Consistency of sample-based stationary points for\ninfinite-dimensional stochastic optimization.",
|
| 354 |
+
"author": "J. Milz.",
|
| 355 |
+
"venue": "preprint, https://arxiv.org/abs/2306.17032, June 2023.",
|
| 356 |
+
"url": null
|
| 357 |
+
}
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"25": {
|
| 361 |
+
"title": "Asymptotic consistency for nonconvex risk-averse stochastic\noptimization with infinite dimensional decision spaces.",
|
| 362 |
+
"author": "J. Milz and T. M. Surowiec.",
|
| 363 |
+
"venue": "Math. Oper. Res., 2023.",
|
| 364 |
+
"url": null
|
| 365 |
+
}
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"26": {
|
| 369 |
+
"title": "Sample size estimates for risk-neutral semilinear PDE-constrained\noptimization.",
|
| 370 |
+
"author": "J. Milz and M. Ulbrich.",
|
| 371 |
+
"venue": "SIAM J. Optim., 34(1):844\u2013869, 2024.",
|
| 372 |
+
"url": null
|
| 373 |
+
}
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"27": {
|
| 377 |
+
"title": "dolfin-adjoint 2018.1: automated adjoints for FEniCS\nand Firedrake.",
|
| 378 |
+
"author": "S. K. Mitusch, S. W. Funke, and J. S. Dokken.",
|
| 379 |
+
"venue": "J. Open Source Softw., 4(38):1292, 2019.",
|
| 380 |
+
"url": null
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"28": {
|
| 385 |
+
"title": "The Moola optimisation package.",
|
| 386 |
+
"author": "M. Nordaas and S. W. Funke.",
|
| 387 |
+
"venue": "https://github.com/funsim/moola, 2016.",
|
| 388 |
+
"url": null
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"29": {
|
| 393 |
+
"title": "Partnership for an Advanced Computing Environment\n(PACE), 2017.",
|
| 394 |
+
"author": "PACE.",
|
| 395 |
+
"venue": "URL: http://www.pace.gatech.edu.",
|
| 396 |
+
"url": null
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"30": {
|
| 401 |
+
"title": "Remarks on inequalities for large deviation probabilities.",
|
| 402 |
+
"author": "I. F. Pinelis and A. I. Sakhanenko.",
|
| 403 |
+
"venue": "Theory Probab. Appl., 30(1):143\u2013148, 1986.",
|
| 404 |
+
"url": null
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"31": {
|
| 409 |
+
"title": "Asymptotic properties of Monte Carlo methods in elliptic\nPDE-constrained optimization under uncertainty.",
|
| 410 |
+
"author": "W. R\u00f6misch and T. M. Surowiec.",
|
| 411 |
+
"venue": "preprint, https://arxiv.org/abs/2106.06347, 2021.",
|
| 412 |
+
"url": null
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"32": {
|
| 417 |
+
"title": "Quasi-Monte Carlo methods in Python.",
|
| 418 |
+
"author": "P. T. Roy, A. B. Owen, M. Balandat, and M. Haberland.",
|
| 419 |
+
"venue": "Journal of Open Source Software, 8(84):5309, 2023.",
|
| 420 |
+
"url": null
|
| 421 |
+
}
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"33": {
|
| 425 |
+
"title": "Learnability, stability and uniform convergence.",
|
| 426 |
+
"author": "S. Shalev-Shwartz, O. Shamir, N. Srebro, and K. Sridharan.",
|
| 427 |
+
"venue": "J. Mach. Learn. Res., 11:2635\u20132670, 2010.",
|
| 428 |
+
"url": null
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"34": {
|
| 433 |
+
"title": "Lectures on Stochastic Programming: Modeling and\nTheory.",
|
| 434 |
+
"author": "A. Shapiro, D. Dentcheva, and A. Ruszczy\u0144ski.",
|
| 435 |
+
"venue": "SIAM, Philadelphia, PA, 3rd edition, 2021.",
|
| 436 |
+
"url": null
|
| 437 |
+
}
|
| 438 |
+
}
|
| 439 |
+
],
|
| 440 |
+
"url": "http://arxiv.org/html/2408.10384v1"
|
| 441 |
+
}
|
20240819/2408.10414v1.json
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Towards Automation of Human Stage of Decay Identification: An Artificial Intelligence Approach",
|
| 3 |
+
"abstract": "Determining the stage of decomposition (SOD) is crucial for estimating the postmortem interval and identifying human remains. Currently, labor-intensive manual scoring methods are used for this purpose, but they are subjective and do not scale for the emerging large-scale archival collections of human decomposition photos. This study explores the feasibility of automating two common human decomposition scoring methods proposed by Megyesi and Gelderman using artificial intelligence (AI). We evaluated two popular deep learning models, Inception V3 and Xception, by training them on a large dataset of human decomposition images to classify the SOD for different anatomical regions, including the head, torso, and limbs. Additionally, an interrater study was conducted to assess the reliability of the AI models compared to human forensic examiners for SOD identification. The Xception model achieved the best classification performance, with macro-averaged F1 scores of .878, .881, and .702 for the head, torso, and limbs when predicting Megyesi\u2019s SODs, and .872, .875, and .76 for the head, torso, and limbs when predicting Gelderman\u2019s SODs. The interrater study results supported AI\u2019s ability to determine the SOD at a reliability level comparable to a human expert. This work demonstrates the potential of AI models trained on a large dataset of human decomposition images to automate SOD identification.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Determining the stage of decay (SOD) is an important and common task in human remains cases. Knowing the degree of decomposition is vital for estimating the postmortem interval (PMI) and identifying human remains [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###]. Presently, establishing the SOD of a decedent is primarily conducted manually, via visual assessment by trained experts using non-metric scoring or staging methods, such as those proposed by Megyesi et al. [1 ###reference_b1###] and Gelderman et al. [2 ###reference_b2###]. Such non-metric methods, which rely on subjective interpretation made by humans, possess a higher susceptibility to human bias and error, consequently affecting the accuracy of downstream tasks, such as estimating the PMI [5 ###reference_b5###, 6 ###reference_b6###, 7 ###reference_b7###, 8 ###reference_b8###]. Furthermore, the PMI estimation formulas derived in existing studies, such as those by Megyesi et al. [1 ###reference_b1###] and Gelderman et al. [2 ###reference_b2###], were developed using a very small number of samples. Evaluating or improving upon these formulas with a much larger sample size, such as over one million photos, would require manual SOD scoring, which is not feasible for such a large sample. Therefore, this work aims to utilize emerging artificial intelligence (AI) methods to evaluate the feasibility of automating the SOD identification task.\nAI is the ability of machines to perform tasks that would typically require human intelligence and it has provided innovative approaches to assist in human decision-making [9 ###reference_b9###, 10 ###reference_b10###]. AI assesses information based on the entirety of acquired facts or data using advanced algorithms, thereby mitigating vulnerability to the subjectivity and biases that trouble humans and affect their decision-making abilities [11 ###reference_b11###]. Additionally, AI algorithms can handle large volumes of data, uncovering intricate patterns that might elude human perception [9 ###reference_b9###, 12 ###reference_b12###, 13 ###reference_b13###]. This ability to process, analyze, and interpret large amounts of data quickly and precisely makes AI a valuable tool in many industries, including forensic practice and research.\nIn summary, the objective of this study is to evaluate the possibility of automating two established human decomposition scoring methods, namely Megyesi et al. [1 ###reference_b1###] and Gelderman et al. [2 ###reference_b2###], using vision-based AI models, known as convolutional neural networks (CNNs). Specifically, various CNN classification models will be trained and evaluated on a large human decomposition image dataset to perform SOD prediction. In addition, an interrater test is conducted to assess and compare the reliability of the models and the human forensic examiners for SOD identification. We hypothesize that similar interrater reliability among human raters and an AI rater suggests the feasibility of using AI for SOD classification and, perhaps, other downstream tasks. The significance of such a finding lies in the potential to develop more accurate SOD and PMI estimation methods that are less effort-intensive and subjective. The primary purpose of this study is to provide a proof-of-concept for the future advancement and integration of AI-assisted analysis in forensic practice and research."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Materials and methods",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Stage of decay scoring methods",
|
| 21 |
+
"text": "The two human decomposition scoring methods this study attempts to automate using AI are: (1) Megyesi et al. [1 ###reference_b1###] and (2) Gelderman et al. [2 ###reference_b2###]. To account for the differential decomposition that occurs in different body segments (e.g., limbs do not bloat or purge fluid), these two scoring methods independently assess the human body in three anatomical regions: (1) the head (including the neck), (2) the torso, and (3) the limbs (including the hands and feet). Based on the morphological features present, Megyesi et al. [1 ###reference_b1###] categorizes human decomposition into four high-level linear stages: fresh, early decomposition, advanced decomposition, and skeletonization. Gelderman et al. [2 ###reference_b2###], building upon the work of Megyesi et al. [1 ###reference_b1###], categorizes each anatomical region into six stages, with the lowest indicating no visible changes and the highest indicating complete skeletonization."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "The human decomposition dataset",
|
| 27 |
+
"text": "The human decomposition dataset, a large-scale image collection used to train the models, includes images of decomposing corpses donated to [removed for double anonymized review]. The center houses [removed for double anonymized review]. Forensic experts from the [removed for double anonymized review] captured these images at non-uniform intervals, with one or more days between each capture. The images, taken from various angles, depict different anatomical areas to illustrate the various stages and regions of human decomposition. The image resolutions vary from 2400\u00d71600 pixels up to 4900\u00d73200 pixels. The dataset covers the period from 2011 to 2023 and comprises over 1.5 million images contributed by more than 800 donors. To train different CNN classifiers on this large human decomposition image dataset to predict the SOD for various anatomical regions, the following challenges needed to be addressed:\nHow to best sample from the entire human decomposition dataset such that the resulting set of images reflects the dataset\u2019s temporal characteristics. In other words, the data used to train the models should consist of images covering the entire human decay process, that is, from death to skeletonization.\nOnce a set of images has been sampled, the challenge is to automate the efficient extraction of specific anatomical regions (i.e., head, torso, and limbs). From a time- and cost-effective perspective, it is not feasible to manually perform this body part filtering of over one million images.\nThe following section (Section 2.3 ###reference_###) details how these analysis challenges were addressed during data preparation."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Data processing and labeling",
|
| 33 |
+
"text": "The human decomposition dataset was processed according to the data pipeline shown in Figure 1 ###reference_###. The remainder of this section further discusses the individual steps of this pipeline.\n###figure_1### The quality and size of the data used to train the models highly affect a model\u2019s performance and generalizability. In other words, the more representative and diverse the training data is, the more likely it is that the model will be able to generalize. In the case of a temporal dataset, such as images documenting human decomposition, where the subjects\u2019 appearance changes over time, it is important to sample the training data in a way that reflects the dataset\u2019s characteristics. Therefore, images depicting all possible decomposition stages should be included in the training data. As a result, a small sample of donors, and all images of those donors, was selected over time (i.e., from when they first started to decay until fully decomposed), instead of randomly selecting images from the entire human decomposition dataset. Since this subset of images included all different kinds of anatomical areas, a previously developed body part classification model was used to automatically detect the head, torso, and limbs images to align with Megyesi et al.\u2019s [1 ###reference_b1###] and Gelderman et al.\u2019s [2 ###reference_b2###] scoring methods. The next step was to manually label this subset of images with the SOD labels. Specifically, the two scoring methods, described in Section 2.1 ###reference_###, were used by a forensic expert to perform manual data labeling of the head, torso, and limbs images. Note that any body part misclassifications and/or poor quality images were either corrected or removed at this point to ensure that the final labeled datasets only included high quality images of the head, torso, and limbs. Table 1 ###reference_### shows the SOD terms used in the original literature (i.e., Megyesi et al. [1 ###reference_b1###] and Gelderman et al. [2 ###reference_b2###]) and the corresponding new class labels used throughout this study. For instance, if the forensic expert, applying Megyesi et al.\u2019s [1 ###reference_b1###] method, determined the SOD of a head image to be fresh, then it was assigned the M-SOD1 class label.\nThe image labeling was conducted using an in-house developed data visualization and annotation software called [removed for double anonymized review]. The resulting labeled datasets are shown in Table 2 ###reference_###, each of which will be used to train a SOD classification model. For example, M-head-data will be used to train a model to predict the SOD (i.e., M-SOD1, M-SOD2, M-SOD3, or M-SOD4) of head images. Finally, each labeled dataset was split into a train and test set using a ratio of 80:20. The train set was used to train the model and the test set was used to evaluate the model once trained."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.4",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "AI model development",
|
| 39 |
+
"text": "To build the SOD classifiers, transfer learning was applied, which aims to produce effective models by leveraging and exploiting previously acquired knowledge [14 ###reference_b14###]. In particular, two CNN architectures, including Inception V3 [15 ###reference_b15###] and Xception [16 ###reference_b16###], pre-trained on the ImageNet dataset [17 ###reference_b17###], were trained using the following two-step transfer learning process: (1) freeze all pre-trained convolutional layers of the base model and train newly added classifier layers and (2) unfreeze all layers and fine-tune the model end-to-end with a low learning rate. The newly added classifier layers consisted of five layers, including one global average pooling layer and one drop-out layer (rate = 0.3) to alleviate the over-fitting problem motivated by Lin et al. [18 ###reference_b18###], followed by two fully-connected layers with 128 and 64 nodes performing down-sampling, and one final softmax layer with the number of nodes equal to the number of classes for multi-class classification. More precisely, the softmax layer transforms the output of the previous layer into a probability distribution over all the classes as shown by the equation (1 ###reference_###), with the class having the highest probability being the final SOD prediction. In equation (1 ###reference_###), is the element of the input vector to the softmax function and the total number of classes. To increase the size and diversity of the data, a data augmentation layer was added after the input layer, performing random image flipping (horizontal and vertical) and rotation during model training. Figure 2 ###reference_### gives an overview of the developed SOD classification framework.\n###figure_2### The loss function used was Cross-Entropy loss, which takes the predicted probability distribution of the softmax layer and measures how well this distribution matches the true distribution. To minimize the Cross-Entropy loss function during training, the Adaptive Momentum Estimation (Adam) optimizer was employed, with a learning rate of 0.001 for the first step and 0.0001 for the second step in the two-step transfer learning process. It is worth noting that training the Inception V3 and Xception architecture from scratch (i.e., without transfer learning), transfer learning without freezing the base model (i.e., one-step transfer learning), and freezing only a certain number of base model layers instead of all followed by fine-tuning was also tested. However, the proposed two-step transfer learning process significantly improved model performances.\nTo evaluate the performance of the trained SOD classifier models, the confusion matrix on the test data was calculated per class, which summarizes a model\u2019s performance by comparing its predicted labels to its true labels. Specifically, the confusion matrix shows the number of correct predictions, such as the true positives (TP) and the true negatives (TN), as well as the number of incorrect predictions, such as the false positives (FP) and the false negatives (FN). The following two performance metrics were then calculated from the per-class confusion matrix: precision (2 ###reference_###) and recall (3 ###reference_###). Precision measures the accuracy of the positive predictions, while recall measures the completeness of the positive predictions.\nTo combine the per-class precision and recall metrics into a single model evaluation metric, the macro-averaged F1 score (mF1) (4 ###reference_###) was calculated and reported, which is the unweighted mean of the per-class F1 scores (5 ###reference_###)."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.5",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "Interrater test",
|
| 45 |
+
"text": "To ensure that manual rating techniques are reliable, interrater reliability tests are often used to assess how similar the ratings are between two or more raters on the same set of data samples. In this study, it is also used to assess the reliability of the developed models. The interrater test involved multiple raters (both human and AI) labeling a set of images depicting the same anatomical region using Megyesi et al.\u2019s [1 ###reference_b1###] and Gelderman et al.\u2019s [2 ###reference_b2###] scoring methods. Specifically, 300 torso images that were not used during model development were selected. Due to limited resources, the interrater test focused on the torso only, which provides a good amount of variability. The raters included the developed torso SOD model or Model and three forensic experts well-versed in the considered scoring methods, including Human 1 (the same human who labeled the data the models were trained with), Human 2, and Human 3. The task for each rater was to label the 300 images using once Megyesi et al.\u2019s [1 ###reference_b1###] method and once Gelderman et al.\u2019s [2 ###reference_b2###] method. The Model rater performed labeling by predicting the SOD of the 300 images, while the human raters were instructed to independently label the 300 images on [removed for double anonymized review], following a similar set-up used for data labeling in Section 2.3 ###reference_###. To ensure randomization across methods, human labeling was conducted in batches of images instead of labeling all 300 images with one method and then the other. Specifically, the human raters were presented with batches of 50 images at a time, which they were asked to label using one scoring method (i.e., Megyesi et al. [1 ###reference_b1###] or Gelderman et al. [2 ###reference_b2###]). This process was repeated until all 300 images were labeled with both scoring methods, resulting in 12 iterations for each human rater ((300 images / 50 images) \u00d7 2 methods). Note, the method used to label a batch of images was alternated between the two scoring methods.\nAfter each rater completed labeling, two types of agreements were assessed: (1) human-human agreement (i.e., the agreement among all three human raters: Human 1, Human 2, and Human 3) and (2) AI-human agreement (i.e., the agreement among the Model replacing Human 1, and the other two human raters, Human 2, and Human 3). The reason for the Model replacing Human 1 was to see how the agreement changes when the human is replaced by the model trained on the data they labeled. To measure the different agreements, the Fleiss\u2019 Kappa statistic was used, which measures the degree of agreement among raters over what would be expected by random chance, with values ranging from -1 (no agreement) to 1 (perfect agreement). The Fleiss\u2019 Kappa values will be interpreted based on Landis and Kock\u2019s [19 ###reference_b19###] interpretation criteria shown in Table 3 ###reference_###."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Results",
|
| 51 |
+
"text": ""
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.1",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "SOD classification",
|
| 57 |
+
"text": "The SOD classifier models were implemented using Keras and TensorFlow, two open-source machine learning modules written in Python. In particular, the models were built and evaluated on the six manually labeled datasets shown in Table 2 ###reference_###. All images were resized to 299\u00d7299 pixels, as that is the required input image size for both the Inception V3 and Xception architectures. All models were trained on a single Tesla V100\\hypSXM2 GPU with 32GB of memory. The batch size was set to 32, and the number of epochs was set to 200, with early stopping set to 20 epochs to avoid over-fitting on the training set.\nTable 4 ###reference_### presents the Megyesi et al. [1 ###reference_b1###] SOD classification results on the test data. For each anatomical region, the best SOD classification performance, as indicated by the mF1 scores, was achieved with the Xception architecture. The head SOD classifier achieved an mF1 score of .878, and the torso SOD classifier achieved an mF1 score of .881, performing comparably. However, the limbs SOD classifier performed notably less well, with an mF1 score of .702.\n###table_1### Table 5 ###reference_### presents the Gelderman et al. [2 ###reference_b2###] SOD classification results on the test data. Similar to the Megyesi et al. [1 ###reference_b1###] results, the best classification performance, as indicated by the mF1 scores, was achieved by the Xception architecture across all anatomical regions. The head SOD classifier achieved an mF1 score of .872, and the torso SOD classifier achieved an mF1 score of .875, performing comparably. However, the limbs SOD classifier performed less well, with an mF1 score of .76.\n###table_2###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.2",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Interrater test",
|
| 63 |
+
"text": "The interrater test results are shown in Table 6 ###reference_###. Specifically, a Fleiss\u2019 Kappa analysis using SPSS Statistics was conducted for both the human-human agreement and AI-human agreement across both scoring methods. The reported agreement levels were determined using Landis and Kock\u2019s [19 ###reference_b19###] interpretation criteria, as shown in Table 3 ###reference_###. According to the Megyesi et al. [1 ###reference_b1###] results, the Fleiss\u2019 Kappa coefficient of the human-human agreement was .67 with a p-value .001 and a 95% confidence interval (CI) of .628 to .713, indicating substantial agreement. Similarly, the Fleiss\u2019 Kappa coefficient of the AI-human agreement was .637 with a p-value .001 and a 95% CI of .594 to .68, suggesting substantial agreement. Additionally, according to the Gelderman et al. [2 ###reference_b2###] results, the Fleiss\u2019 Kappa coefficient of the human-human agreement was .593 with a p-value .001 and a 95% CI of .558 to .628, indicating moderate agreement. Similarly, the Fleiss\u2019 Kappa coefficient of the AI-human agreement was .558 with a p-value .001 and a 95% CI of .524 to .592, suggesting moderate agreement. In all cases, the Fleiss\u2019 Kappa coefficient was statistically significant (i.e., the p-value 0.05)."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.3",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "Discussion",
|
| 69 |
+
"text": "Overall, the SOD classification results are promising. The Xception architecture performed the best across both scoring methods. The head and torso SOD models performed comparably well; however, the limbs SOD models\u2019 performances were not as strong. Further analysis of the limb data revealed that some images included hands and/or feet covered by a net (to prevent them from being scattered/disarticulated by animal scavengers), which could confuse and distract the model, leading to incorrect predictions. Additionally, the examination of the limbs data indicated that some images included other parts of the body, specifically the torso. Since the torso decays differently than the limbs, this inclusion could again confuse the model and, consequently, affect its predictions. Future work will focus on addressing these data quality challenges to ensure the development of a more reliable limbs dataset and hence improved prediction performance. Another important finding to mention is that although the Gelderman et al. [2 ###reference_b2###] datasets contained more SOD classes (six classes) than the Megyesi et al. [1 ###reference_b1###] datasets (four classes), the classification performances were comparable. This indicates that these AI models are able to learn a higher number of decay stages without decreasing prediction performance.\nWhile the SOD prediction performances were promising overall, there is room for improvement, as indicated by the per-class precision and recall values. The sizes of the labeled datasets used in this study are considered rather small for training deep learning architectures, such as Inception V3 and Xception. Additionally, a deeper analysis of the labeled datasets indicated class imbalance (i.e., a disproportionate number of instances of one class compared to another). Having a larger, more diverse, and evenly balanced dataset will make the models more robust and improve their generalization capabilities, which refers to how well a model can react to new and unseen data. However, creating more labeled data where domain expertise is required may be limited by both resource and time constraints. A recent study [removed for double anonymized review] addressed such challenges by developing a domain-aware label propagation algorithm that leverages different image attributes to automatically perform data labeling, thereby reducing manual labeling efforts and costs. Future work will explore integrating such label propagation methods to obtain larger and more diverse datasets, aiming to create more robust and accurate SOD classification models.\nIn the interrater test, both the human-human and AI-human agreements showed substantial agreement when applying the Megyesi et al. [1 ###reference_b1###] scoring method. Using the Gelderman et al. [2 ###reference_b2###] scoring method, moderate agreement was observed for both the human-human and AI-human agreements. Across both scoring methods, the level of agreement for both the human-human and AI-human agreements was the same. This means that when the human rater was replaced with the AI model rater, the level of reliability stayed the same, supporting AI\u2019s ability to perform SOD identification with a reliability level comparable to that of an experienced human forensic examiner. Notably, the lower agreement level for the Gelderman et al. [2 ###reference_b2###] method could be attributed to (1) its novelty in the field and/or (2) its complexity, being more complicated to apply, as it involves six decay stages compared to the four stages in the Megyesi et al. [1 ###reference_b1###] method.\nWhile the results demonstrate that human decomposition scoring methods have the potential to be automated using AI techniques, there are some important limitations of this work. For one, the data labeling was conducted by a single forensic expert. This approach may introduce labeling bias, which can lead to inherently biased training datasets. Models trained on such datasets can inherit these biases, resulting in biased models [20 ###reference_b20###]. Therefore, future work will focus on creating a so-called \u201cgold standard\u201d dataset\u2014a labeled dataset meticulously crafted and evaluated by multiple forensic experts. Such a dataset would be accepted as the most accurate and reliable of its kind. This step will ensure that the models are trained with accurate and unbiased data, which is vital for developing high-quality models.\nAn additional limitation is that this study is environmental- and climate-specific. The images used to train the models were all taken of donors decaying outdoors in an open-wooded area with the ground consisting of soil, gravel, and dead/decaying plant matter (e.g., rotting wood and shedding leaves). Additionally, the climate of this area is humid subtropical, characterized by high summer and moderate winter temperatures. Therefore, the models may not perform as well on images taken in different climate conditions or environments, necessitating additional training or re-training with images specific to those conditions."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Conclusion",
|
| 75 |
+
"text": "This study explored the possibility of automating two common human decomposition scoring methods, namely Megyesi et al. [1 ###reference_b1###] and Gelderman et al. [2 ###reference_b2###]. Specifically, different CNN models, including Inception V3 and Xception, were trained on a large human decomposition image dataset to classify the SOD for different anatomical regions. Across both scoring methods, the Xception model achieved the highest classification results, performing comparably well for the head and torso, and slightly lower for the limbs. The interrater reliability study results provided support for AI\u2019s ability to automate the SOD identification task at a reliability level comparable to a human expert. Overall, the study results are promising and provide a proof-of-concept for automating human decomposition scoring methods using AI."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [],
|
| 79 |
+
"tables": {
|
| 80 |
+
"1": {
|
| 81 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T1.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T1.2.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T1.2.1.1.1\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T1.2.1.1.2\">Original SOD Term</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T1.2.1.1.3\">New SOD Class Label</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.2.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.2.2.1.1\" rowspan=\"4\"><span class=\"ltx_text\" id=\"S2.T1.2.2.1.1.1\">Megyesi et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib1\" title=\"\">1</a>]</cite></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.2.2.1.2\">fresh</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T1.2.2.1.3\">M-SOD1</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.3.2.1\">early decomposition</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.3.2.2\">M-SOD2</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.4.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.4.3.1\">advanced decomposition</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.4.3.2\">M-SOD3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.5.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.5.4.1\">skeletonization</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.5.4.2\">M-SOD4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.6.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S2.T1.2.6.5.1\" rowspan=\"6\"><span class=\"ltx_text\" id=\"S2.T1.2.6.5.1.1\">Gelderman et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib2\" title=\"\">2</a>]</cite></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.2.6.5.2\">1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.2.6.5.3\">G-SOD1</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.7.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.7.6.1\">2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.7.6.2\">G-SOD2</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.8.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.8.7.1\">3</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.8.7.2\">G-SOD3</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.9.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.9.8.1\">4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.9.8.2\">G-SOD4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.10.9\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.10.9.1\">5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.2.10.9.2\">G-SOD5</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.2.11.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T1.2.11.10.1\">6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T1.2.11.10.2\">G-SOD6</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T1.3.1.1\" style=\"font-size:90%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S2.T1.4.2\" style=\"font-size:90%;\">The original SOD terms mapped to the new SOD class labels used throughout this study for the different scoring methods.</span></figcaption>\n</figure>",
|
| 82 |
+
"capture": "Table 1: The original SOD terms mapped to the new SOD class labels used throughout this study for the different scoring methods."
|
| 83 |
+
},
|
| 84 |
+
"2": {
|
| 85 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T2.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T2.2.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T2.2.1.1.1\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T2.2.1.1.2\">Anatomical Region</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T2.2.1.1.3\">Dataset</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T2.2.1.1.4\">NoL</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T2.2.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.2.2.1.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S2.T2.2.2.1.1.1\">Megyesi et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib1\" title=\"\">1</a>]</cite></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.2.2.1.2\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.2.2.1.3\">M-head-data</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T2.2.2.1.4\">2110</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.2.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.3.2.1\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.3.2.2\">M-torso-data</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.3.2.3\">1979</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.2.4.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.4.3.1\">limbs</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.4.3.2\">M-limbs-data</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.4.3.3\">2152</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.2.5.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S2.T2.2.5.4.1\" rowspan=\"3\"><span class=\"ltx_text\" id=\"S2.T2.2.5.4.1.1\">Gelderman et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib2\" title=\"\">2</a>]</cite></span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T2.2.5.4.2\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T2.2.5.4.3\">G-head-data</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T2.2.5.4.4\">2041</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.2.6.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.6.5.1\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.6.5.2\">G-torso-data</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T2.2.6.5.3\">1982</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T2.2.7.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T2.2.7.6.1\">limbs</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T2.2.7.6.2\">G-limbs-data</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T2.2.7.6.3\">2032</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T2.3.1.1\" style=\"font-size:90%;\">Table 2</span>: </span><span class=\"ltx_text\" id=\"S2.T2.4.2\" style=\"font-size:90%;\">The labeled datasets used to train the SOD classifier models. Method indicates the SOD scoring method used to label the different anatomical regions. NoL gives the number of labeled images for the particular dataset.</span></figcaption>\n</figure>",
|
| 86 |
+
"capture": "Table 2: The labeled datasets used to train the SOD classifier models. Method indicates the SOD scoring method used to label the different anatomical regions. NoL gives the number of labeled images for the particular dataset."
|
| 87 |
+
},
|
| 88 |
+
"3": {
|
| 89 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T3.6\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T3.6.7.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row\" id=\"S2.T3.6.7.1.1\">Fleiss\u2019 Kappa</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S2.T3.6.7.1.2\">Level of Agreement</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T3.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_tt\" id=\"S2.T3.1.1.1\">\n 0.8</th>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S2.T3.1.1.2\">almost perfect</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T3.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S2.T3.2.2.1\">\n 0.6</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T3.2.2.2\">substantial</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T3.3.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S2.T3.3.3.1\">\n 0.4</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T3.3.3.2\">moderate</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T3.4.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S2.T3.4.4.1\">\n 0.2</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T3.4.4.2\">fair</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T3.5.5\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S2.T3.5.5.1\">\n 0</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T3.5.5.2\">slight</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T3.6.6\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb\" id=\"S2.T3.6.6.1\">\n 0</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S2.T3.6.6.2\">no agreement</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T3.8.1.1\" style=\"font-size:90%;\">Table 3</span>: </span><span class=\"ltx_text\" id=\"S2.T3.9.2\" style=\"font-size:90%;\">Landis and Kock\u2019s\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib19\" title=\"\">19</a>]</cite> Fleiss\u2019 Kappa interpretation criteria.</span></figcaption>\n</figure>",
|
| 90 |
+
"capture": "Table 3: Landis and Kock\u2019s\u00a0[19] Fleiss\u2019 Kappa interpretation criteria."
|
| 91 |
+
},
|
| 92 |
+
"4": {
|
| 93 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T4\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S3.T4.2\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T4.2.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.1.1.1\" rowspan=\"3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"><span class=\"ltx_text\" id=\"S3.T4.2.1.1.1.1\">Archt.</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.1.1.2\" rowspan=\"3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"><span class=\"ltx_text\" id=\"S3.T4.2.1.1.2.1\">Region</span></td>\n<td class=\"ltx_td ltx_align_center\" colspan=\"8\" id=\"S3.T4.2.1.1.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">Per-Class Precision (P) & Recall (R)</td>\n<td class=\"ltx_td\" id=\"S3.T4.2.1.1.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T4.2.2.2.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">M-SOD1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T4.2.2.2.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">M-SOD2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T4.2.2.2.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">M-SOD3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T4.2.2.2.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">M-SOD4</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.2.2.5\" rowspan=\"2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"><span class=\"ltx_text\" id=\"S3.T4.2.2.2.5.1\">mF1</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.3.3.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">R</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.1\" rowspan=\"3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"><span class=\"ltx_text\" id=\"S3.T4.2.4.4.1.1\">InceptionV3</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.846</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.805</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.835</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.91</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.698</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.677</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.882</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.778</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T4.2.4.4.11\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.806</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.5.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.936</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.83</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.683</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.707</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.733</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.892</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.5.5.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.845</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.6.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">limbs</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.667</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.333</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.94</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.94</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.467</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.897</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.929</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.6.6.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.695</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.7.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S3.T4.2.7.7.1\" rowspan=\"3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\"><span class=\"ltx_text\" id=\"S3.T4.2.7.7.1.1\">Xception</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.932</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.842</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.878</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.966</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.887</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.723</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.893</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.926</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T4.2.7.7.11\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.878</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.8.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.926</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.898</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.754</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.767</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.829</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.872</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T4.2.8.8.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.881</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T4.2.9.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.1\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">limbs</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.2\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.3\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.4\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.924</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.5\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.978</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.6\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.7\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.214</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.8\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.92</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.9\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.967</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T4.2.9.9.10\" style=\"padding-left:7.0pt;padding-right:7.0pt;\">.702</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S3.T4.3.1.1\" style=\"font-size:90%;\">Table 4</span>: </span><span class=\"ltx_text\" id=\"S3.T4.4.2\" style=\"font-size:90%;\">Megyesi et al.\u2019s\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib1\" title=\"\">1</a>]</cite> SOD classification results.</span></figcaption>\n</figure>",
|
| 94 |
+
"capture": "Table 4: Megyesi et al.\u2019s\u00a0[1] SOD classification results."
|
| 95 |
+
},
|
| 96 |
+
"5": {
|
| 97 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T5\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S3.T5.2\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T5.2.1.1\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.1.1.1\" rowspan=\"3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"><span class=\"ltx_text\" id=\"S3.T5.2.1.1.1.1\">Archt.</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.1.1.2\" rowspan=\"3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"><span class=\"ltx_text\" id=\"S3.T5.2.1.1.2.1\">Region</span></td>\n<td class=\"ltx_td ltx_align_center\" colspan=\"12\" id=\"S3.T5.2.1.1.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">Per-Class Precision (P) & Recall (R)</td>\n<td class=\"ltx_td\" id=\"S3.T5.2.1.1.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD1</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD2</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD3</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD4</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" colspan=\"2\" id=\"S3.T5.2.2.2.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">G-SOD6</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.2.2.7\" rowspan=\"2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"><span class=\"ltx_text\" id=\"S3.T5.2.2.2.7.1\">mF1</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">P</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.3.3.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">R</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.1\" rowspan=\"3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"><span class=\"ltx_text\" id=\"S3.T5.2.4.4.1.1\">InceptionV3</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.903</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.933</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.946</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.907</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.696</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.8</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.895</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.944</td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T5.2.4.4.15\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.866</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.5.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.2</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.862</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.862</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.954</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.954</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.897</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.833</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.7</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.824</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.833</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.714</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.5.5.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.749</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.6.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">limbs</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.75</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.75</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.704</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.907</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.739</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.696</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.765</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.512</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">0.059</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.6.6.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.651</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.7.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_t\" id=\"S3.T5.2.7.7.1\" rowspan=\"3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\"><span class=\"ltx_text\" id=\"S3.T5.2.7.7.1.1\">Xception</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">head</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.882</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.947</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.918</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.806</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.829</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.818</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.9</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.889</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T5.2.7.7.15\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.872</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.8.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">torso</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.667</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.964</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.931</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.974</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.862</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.792</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.789</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.944</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.81</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T5.2.8.8.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.875</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T5.2.9.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.1\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">limbs</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.2\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.3\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.4\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.875</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.5\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.947</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.6\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.909</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.7\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.87</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.8\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.9\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.824</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.10\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.826</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.11\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.905</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.12\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">1.0</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.13\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">0.071</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T5.2.9.9.14\" style=\"padding-left:2.4pt;padding-right:2.4pt;\">.76</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S3.T5.3.1.1\" style=\"font-size:90%;\">Table 5</span>: </span><span class=\"ltx_text\" id=\"S3.T5.4.2\" style=\"font-size:90%;\">Gelderman et al.\u2019s\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib2\" title=\"\">2</a>]</cite> SOD classification results.</span></figcaption>\n</figure>",
|
| 98 |
+
"capture": "Table 5: Gelderman et al.\u2019s\u00a0[2] SOD classification results."
|
| 99 |
+
},
|
| 100 |
+
"6": {
|
| 101 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T6\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T6.4\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T6.4.5.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S3.T6.4.5.1.1\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.4.5.1.1.1\">Method</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S3.T6.4.5.1.2\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.4.5.1.2.1\">Agreement</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S3.T6.4.5.1.3\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.4.5.1.3.1\">Fleiss\u2019 Kappa</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S3.T6.4.5.1.4\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.4.5.1.4.1\">P-value</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" colspan=\"2\" id=\"S3.T6.4.5.1.5\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">95% CI</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S3.T6.4.5.1.6\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">Agreement</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T6.4.6.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T6.4.6.2.1\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">UB</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T6.4.6.2.2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">LB</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.4.6.2.3\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">Level</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T6.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.2\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.1.1.2.1\">Megyesi et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib1\" title=\"\">1</a>]</cite></span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.3\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">human-human</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.4\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.67</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.1\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">\n.001</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.5\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.628</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.6\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.713</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S3.T6.1.1.7\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">substantial</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T6.2.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">AI-human</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.3\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.637</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.1\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">\n.001</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.4\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.594</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.5\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.68</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S3.T6.2.2.6\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">substantial</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T6.3.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_bb ltx_border_t\" id=\"S3.T6.3.3.2\" rowspan=\"2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\"><span class=\"ltx_text\" id=\"S3.T6.3.3.2.1\">Gelderman et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib2\" title=\"\">2</a>]</cite></span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.3\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">human-human</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.4\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.593</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.1\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">\n.001</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.5\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.558</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.6\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.628</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T6.3.3.7\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">moderate</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T6.4.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.2\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">AI-human</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.3\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.558</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.1\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">\n.001</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.4\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.524</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.5\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">.592</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S3.T6.4.4.6\" style=\"padding-left:4.0pt;padding-right:4.0pt;\">moderate</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S3.T6.6.1.1\" style=\"font-size:90%;\">Table 6</span>: </span><span class=\"ltx_text\" id=\"S3.T6.7.2\" style=\"font-size:90%;\">The interrater study results. Reported are the Fleiss\u2019 kappa value, the p-value, the upper bound (UB) and lower bound (LB) of the 95% confidence interval (CI), and the agreement level for both the human-human and AI-human agreement across both scoring methods. The agreement levels are determined using Landis and Kock\u2019s\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#bib.bib19\" title=\"\">19</a>]</cite> interpretation criteria, as shown in Table\u00a0<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2408.10414v1#S2.T3\" title=\"Table 3 \u2023 2.5 Interrater test \u2023 2 Materials and methods \u2023 Towards Automation of Human Stage of Decay Identification: An Artificial Intelligence Approach\"><span class=\"ltx_text ltx_ref_tag\">3</span></a>.</span></figcaption>\n</figure>",
|
| 102 |
+
"capture": "Table 6: The interrater study results. Reported are the Fleiss\u2019 kappa value, the p-value, the upper bound (UB) and lower bound (LB) of the 95% confidence interval (CI), and the agreement level for both the human-human and AI-human agreement across both scoring methods. The agreement levels are determined using Landis and Kock\u2019s\u00a0[19] interpretation criteria, as shown in Table\u00a03."
|
| 103 |
+
}
|
| 104 |
+
},
|
| 105 |
+
"image_paths": {
|
| 106 |
+
"1": {
|
| 107 |
+
"figure_path": "2408.10414v1_figure_1.png",
|
| 108 |
+
"caption": "Figure 1: The data processing pipeline.",
|
| 109 |
+
"url": "http://arxiv.org/html/2408.10414v1/extracted/5800992/data_processing_pipeline.png"
|
| 110 |
+
},
|
| 111 |
+
"2": {
|
| 112 |
+
"figure_path": "2408.10414v1_figure_2.png",
|
| 113 |
+
"caption": "Figure 2: The SOD classification framework was trained using a two-step transfer learning approach. In step 1, the pre-trained convolutional layers of the base model (blue rectangle) were frozen, and only the newly added classifier layers (yellow rectangle) were trained. In step 2, the previously frozen layers from step 1 (blue rectangle) were unfrozen, and the entire model was trained end-to-end.",
|
| 114 |
+
"url": "http://arxiv.org/html/2408.10414v1/extracted/5800992/framework.png"
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
"validation": true,
|
| 118 |
+
"references": [
|
| 119 |
+
{
|
| 120 |
+
"1": {
|
| 121 |
+
"title": "doi:10.1.1/jpb001.",
|
| 122 |
+
"author": "M. S. Megyesi, S. P. Nawrocki, N. H. Haskell, Using accumulated degree-days to estimate the postmortem interval from decomposed human remains, Journal of Forensic Science 50 (3) (2005) 618\u2013626.",
|
| 123 |
+
"venue": null,
|
| 124 |
+
"url": "https://doi.org/10.1.1/jpb001"
|
| 125 |
+
}
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"2": {
|
| 129 |
+
"title": "doi:10.1007/s00414-017-1700-9.",
|
| 130 |
+
"author": "H. Gelderman, L. Boer, T. Naujocks, A. IJzermans, W. Duijst, The development of a post-mortem interval estimation for human remains found on land in the netherlands, International journal of legal medicine 132 (3) (2018) 863\u2013873.",
|
| 131 |
+
"venue": null,
|
| 132 |
+
"url": "https://doi.org/10.1007/s00414-017-1700-9"
|
| 133 |
+
}
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"3": {
|
| 137 |
+
"title": "doi:10.1016/j.forsciint.2010.04.052.",
|
| 138 |
+
"author": "A. A. Vass, The elusive universal post-mortem interval formula, Forensic science international 204 (1-3) (2011) 34\u201340.",
|
| 139 |
+
"venue": null,
|
| 140 |
+
"url": "https://doi.org/10.1016/j.forsciint.2010.04.052"
|
| 141 |
+
}
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"4": {
|
| 145 |
+
"title": "doi:10.1520/JFS12680J.",
|
| 146 |
+
"author": "A. Galloway, W. H. Birkby, A. M. Jones, T. E. Henry, B. O. Parks, Decay rates of human remains in an arid environment, Journal of forensic sciences 34 (3) (1989) 607\u2013616.",
|
| 147 |
+
"venue": null,
|
| 148 |
+
"url": "https://doi.org/10.1520/JFS12680J"
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"5": {
|
| 153 |
+
"title": "doi:10.1016/j.scijus.2013.11.003.",
|
| 154 |
+
"author": "S. Nakhaeizadeh, I. E. Dror, R. M. Morgan, Cognitive bias in forensic anthropology: visual assessment of skeletal remains is susceptible to confirmation bias, Science & Justice 54 (3) (2014) 208\u2013214.",
|
| 155 |
+
"venue": null,
|
| 156 |
+
"url": "https://doi.org/10.1016/j.scijus.2013.11.003"
|
| 157 |
+
}
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"6": {
|
| 161 |
+
"title": "doi:10.1016/j.forsciint.2019.01.016.",
|
| 162 |
+
"author": "G. S. Cooper, V. Meterko, Cognitive bias research in forensic science: a systematic review, Forensic science international 297 (2019) 35\u201346.",
|
| 163 |
+
"venue": null,
|
| 164 |
+
"url": "https://doi.org/10.1016/j.forsciint.2019.01.016"
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"7": {
|
| 169 |
+
"title": "doi:10.1016/j.jarmac.2017.09.001.",
|
| 170 |
+
"author": "J. Kukucka, S. M. Kassin, P. A. Zapf, I. E. Dror, Cognitive bias and blindness: a global survey of forensic science examiners., Journal of applied research in memory and cognition 6 (4) (2017) 452\u2013459.",
|
| 171 |
+
"venue": null,
|
| 172 |
+
"url": "https://doi.org/10.1016/j.jarmac.2017.09.001"
|
| 173 |
+
}
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"8": {
|
| 177 |
+
"title": "doi:10.1016/j.bushor.2018.03.007.",
|
| 178 |
+
"author": "M. H. Jarrahi, Artificial intelligence and the future of work: human-ai symbiosis in organizational decision making, Business horizons 61 (4) (2018) 577\u2013586.",
|
| 179 |
+
"venue": null,
|
| 180 |
+
"url": "https://doi.org/10.1016/j.bushor.2018.03.007"
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"9": {
|
| 185 |
+
"title": "doi:10.3390/diagnostics13182992.",
|
| 186 |
+
"author": "A.-I. Piraianu, A. Fulga, C. L. Musat, O.-R. Ciobotaru, D. G. Poalelungi, E. Stamate, O. Ciobotaru, I. Fulga, Enhancing the evidence with algorithms: how artificial intelligence is transforming forensic medicine, Diagnostics 13 (18) (2023) 2992\u20133003.",
|
| 187 |
+
"venue": null,
|
| 188 |
+
"url": "https://doi.org/10.3390/diagnostics13182992"
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"10": {
|
| 193 |
+
"title": "doi:10.7717/peerj-cs.488.",
|
| 194 |
+
"author": "A. M. Rahmani, E. Azhir, S. Ali, M. Mohammadi, O. H. Ahmed, M. Y. Ghafour, S. H. Ahmed, M. Hosseinzadeh, Artificial intelligence approaches and mechanisms for big data analytics: a systematic study, PeerJ computer science 7 (2021) 1\u201328.",
|
| 195 |
+
"venue": null,
|
| 196 |
+
"url": "https://doi.org/10.7717/peerj-cs.488"
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"11": {
|
| 201 |
+
"title": "doi:10.3389/frai.2021.622364.",
|
| 202 |
+
"author": "J. H. Korteling, G. C. van de Boer-Visschedijk, R. A. Blankendaal, R. C. Boonekamp, A. R. Eikelboom, Human-versus artificial intelligence, Frontiers in artificial intelligence 4 (2021) 1\u201313.",
|
| 203 |
+
"venue": null,
|
| 204 |
+
"url": "https://doi.org/10.3389/frai.2021.622364"
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"12": {
|
| 209 |
+
"title": "doi:10.1109/TKDE.2009.191.",
|
| 210 |
+
"author": "S. J. Pan, Q. Yang, A survey on transfer learning, IEEE Transactions on knowledge and data engineering 22 (10) (2010) 1345\u20131359.",
|
| 211 |
+
"venue": null,
|
| 212 |
+
"url": "https://doi.org/10.1109/TKDE.2009.191"
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"13": {
|
| 217 |
+
"title": "doi:10.1109/CVPR.2016.308.",
|
| 218 |
+
"author": "C. Szegedy, V. Vanhoucke, S. Ioffe, J. Shlens, Z. Wojna, Rethinking the inception architecture for computer vision, in: IEEE conference on computer vision and pattern recognition, 2016, pp. 2818\u20132826.",
|
| 219 |
+
"venue": null,
|
| 220 |
+
"url": "https://doi.org/10.1109/CVPR.2016.308"
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"14": {
|
| 225 |
+
"title": "doi:10.48550/arXiv.1610.02357.",
|
| 226 |
+
"author": "F. Chollet, Xception: deep learning with depthwise separable convolutions, in: IEEE conference on computer vision and pattern recognition, 2017, pp. 1251\u20131258.",
|
| 227 |
+
"venue": null,
|
| 228 |
+
"url": "https://doi.org/10.48550/arXiv.1610.02357"
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"15": {
|
| 233 |
+
"title": "doi:10.1109/CVPR.2009.5206848.",
|
| 234 |
+
"author": "J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, L. Fei-Fei, Imagenet: a large-scale hierarchical image database, in: IEEE conference on computer vision and pattern recognition, 2009, pp. 248\u2013255.",
|
| 235 |
+
"venue": null,
|
| 236 |
+
"url": "https://doi.org/10.1109/CVPR.2009.5206848"
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"16": {
|
| 241 |
+
"title": "doi:10.48550/arXiv.1312.4400.",
|
| 242 |
+
"author": "M. Lin, Q. Chen, S. Yan, Network in network, arXiv preprint arXiv:1312.4400 (2013).",
|
| 243 |
+
"venue": null,
|
| 244 |
+
"url": "https://doi.org/10.48550/arXiv.1312.4400"
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"17": {
|
| 249 |
+
"title": "doi:10.48550/arXiv.1901.04966.",
|
| 250 |
+
"author": "H. Jiang, O. Nachum, Identifying and correcting label bias in machine learning, in: International conference on artificial intelligence and statistics, PMLR, 2020, pp. 702\u2013712.",
|
| 251 |
+
"venue": null,
|
| 252 |
+
"url": "https://doi.org/10.48550/arXiv.1901.04966"
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
],
|
| 256 |
+
"url": "http://arxiv.org/html/2408.10414v1"
|
| 257 |
+
}
|
20240819/2408.11868v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|