SlowGuess commited on
Commit
030ce14
·
verified ·
1 Parent(s): d341b62

Add Batch 132988ed-2d08-4f3e-b85c-dd88bf45a20c

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +27 -0
  2. 2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_content_list.json +0 -0
  3. 2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_model.json +0 -0
  4. 2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_origin.pdf +3 -0
  5. 2201.00xxx/2201.00641/full.md +469 -0
  6. 2201.00xxx/2201.00641/images.zip +3 -0
  7. 2201.00xxx/2201.00641/layout.json +0 -0
  8. 2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_content_list.json +1094 -0
  9. 2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_model.json +1435 -0
  10. 2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_origin.pdf +3 -0
  11. 2201.11xxx/2201.11248/full.md +246 -0
  12. 2201.11xxx/2201.11248/images.zip +3 -0
  13. 2201.11xxx/2201.11248/layout.json +0 -0
  14. 2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_content_list.json +0 -0
  15. 2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_model.json +0 -0
  16. 2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_origin.pdf +3 -0
  17. 2201.11xxx/2201.11271/full.md +488 -0
  18. 2201.11xxx/2201.11271/images.zip +3 -0
  19. 2201.11xxx/2201.11271/layout.json +0 -0
  20. 2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_content_list.json +0 -0
  21. 2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_model.json +0 -0
  22. 2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_origin.pdf +3 -0
  23. 2201.11xxx/2201.11279/full.md +487 -0
  24. 2201.11xxx/2201.11279/images.zip +3 -0
  25. 2201.11xxx/2201.11279/layout.json +0 -0
  26. 2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_content_list.json +873 -0
  27. 2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_model.json +1193 -0
  28. 2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_origin.pdf +3 -0
  29. 2201.11xxx/2201.11295/full.md +157 -0
  30. 2201.11xxx/2201.11295/images.zip +3 -0
  31. 2201.11xxx/2201.11295/layout.json +0 -0
  32. 2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_content_list.json +1007 -0
  33. 2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_model.json +1231 -0
  34. 2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_origin.pdf +3 -0
  35. 2201.11xxx/2201.11302/full.md +158 -0
  36. 2201.11xxx/2201.11302/images.zip +3 -0
  37. 2201.11xxx/2201.11302/layout.json +0 -0
  38. 2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_content_list.json +0 -0
  39. 2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_model.json +0 -0
  40. 2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_origin.pdf +3 -0
  41. 2201.11xxx/2201.11332/full.md +392 -0
  42. 2201.11xxx/2201.11332/images.zip +3 -0
  43. 2201.11xxx/2201.11332/layout.json +0 -0
  44. 2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_content_list.json +0 -0
  45. 2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_model.json +0 -0
  46. 2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_origin.pdf +3 -0
  47. 2201.11xxx/2201.11349/full.md +600 -0
  48. 2201.11xxx/2201.11349/images.zip +3 -0
  49. 2201.11xxx/2201.11349/layout.json +0 -0
  50. 2201.11xxx/2201.11368/7f8ba145-d060-44e2-99f9-7e27e253fecb_content_list.json +829 -0
.gitattributes CHANGED
@@ -8876,3 +8876,30 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
8876
  2201.12xxx/2201.12904/3e258d72-8b3c-4392-8435-890d088d9304_origin.pdf filter=lfs diff=lfs merge=lfs -text
8877
  2201.12xxx/2201.12944/c22ca2bf-d69e-4dd8-825c-533adbb01113_origin.pdf filter=lfs diff=lfs merge=lfs -text
8878
  2201.12xxx/2201.12965/3df350f9-9114-42cb-888c-5b6a3da9c3f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8876
  2201.12xxx/2201.12904/3e258d72-8b3c-4392-8435-890d088d9304_origin.pdf filter=lfs diff=lfs merge=lfs -text
8877
  2201.12xxx/2201.12944/c22ca2bf-d69e-4dd8-825c-533adbb01113_origin.pdf filter=lfs diff=lfs merge=lfs -text
8878
  2201.12xxx/2201.12965/3df350f9-9114-42cb-888c-5b6a3da9c3f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
8879
+ 2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8880
+ 2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8881
+ 2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_origin.pdf filter=lfs diff=lfs merge=lfs -text
8882
+ 2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_origin.pdf filter=lfs diff=lfs merge=lfs -text
8883
+ 2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8884
+ 2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_origin.pdf filter=lfs diff=lfs merge=lfs -text
8885
+ 2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8886
+ 2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_origin.pdf filter=lfs diff=lfs merge=lfs -text
8887
+ 2201.11xxx/2201.11368/7f8ba145-d060-44e2-99f9-7e27e253fecb_origin.pdf filter=lfs diff=lfs merge=lfs -text
8888
+ 2201.11xxx/2201.11370/dd97e1ba-8b9d-454f-a8bf-4ee11f2b5c38_origin.pdf filter=lfs diff=lfs merge=lfs -text
8889
+ 2201.11xxx/2201.11380/f9adaa3f-9ea9-4298-ab05-6cb258bd78ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
8890
+ 2201.11xxx/2201.11410/b17192e6-7ad6-4f43-bb57-082ced694d80_origin.pdf filter=lfs diff=lfs merge=lfs -text
8891
+ 2201.11xxx/2201.11440/63ad68db-b55a-4075-a839-37ca7b639a1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8892
+ 2201.11xxx/2201.11460/1586a2c2-9c40-44f4-af89-0313d90b2856_origin.pdf filter=lfs diff=lfs merge=lfs -text
8893
+ 2201.11xxx/2201.11473/77fd747e-86a4-46fc-82d1-bf5212baa69b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8894
+ 2201.11xxx/2201.11503/765480e9-849d-438c-a854-97093d234205_origin.pdf filter=lfs diff=lfs merge=lfs -text
8895
+ 2201.11xxx/2201.11528/d7a293b7-5171-4873-813c-f75d4e4dbc3b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8896
+ 2201.11xxx/2201.11665/9523db57-1509-41fd-bbb0-07a5aa5acc99_origin.pdf filter=lfs diff=lfs merge=lfs -text
8897
+ 2201.11xxx/2201.11679/c3e2d7f4-ca78-4c14-8dc3-25310c916fc9_origin.pdf filter=lfs diff=lfs merge=lfs -text
8898
+ 2201.11xxx/2201.11692/64a0c7a8-2838-4643-acd5-24c9af7b98fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
8899
+ 2201.11xxx/2201.11706/d58e9fa2-c644-4e3f-bb8e-ec2521255aa5_origin.pdf filter=lfs diff=lfs merge=lfs -text
8900
+ 2201.11xxx/2201.11732/efe720a9-843c-4a1f-b62d-e2aa64ae33f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
8901
+ 2201.11xxx/2201.11736/25a5f422-e340-493a-be09-65160267a94d_origin.pdf filter=lfs diff=lfs merge=lfs -text
8902
+ 2201.11xxx/2201.11760/bd336d82-dcfd-40c0-afc5-b7db2433c7e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
8903
+ 2201.11xxx/2201.11793/9c2b9e4d-de05-4c2f-a8a2-19a815911590_origin.pdf filter=lfs diff=lfs merge=lfs -text
8904
+ 2201.11xxx/2201.11812/9cd2bbde-e54b-4327-a96d-99fe343b23b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
8905
+ 2202.03xxx/2202.03477/38a925f9-dfc9-4352-9d23-131408ae0853_origin.pdf filter=lfs diff=lfs merge=lfs -text
2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.00xxx/2201.00641/7fee1c49-388e-4387-8909-7c3f47f7ba2a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9de635c45dbfb73a21cfccf39691f80a767dd9004c169be8c89dbfb12ad94dff
3
+ size 3745190
2201.00xxx/2201.00641/full.md ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # VDPC: Variational Density Peak Clustering Algorithm
2
+
3
+ Yizhang Wang $^{a,b}$ , Chai Quek $^{c}$ , You Zhou $^{d}$ , Di Wang $^{e,f,*}$
4
+
5
+ $^{a}$ College of Information Engineering, Yangzhou University, Yangzhou, China
6
+
7
+ $^{b}$ Institute of Scientific and Technical Information of China, Beijing, China
8
+
9
+ $^{c}$ School of Computer Science and Engineering, Nanyang Technological University, Singapore
10
+
11
+ $^{d}$ College of Computer Science and Technology, Jilin University, Changchun, China
12
+
13
+ $^{e}$ Joint NTU-UBC Research Centre of Excellence in Active Living for the Elderly, Nanyang Technological University,
14
+
15
+ Singapore
16
+
17
+ fJoint NTU-WeBank Research Centre on Fintech, Nanyang Technological University, Singapore
18
+
19
+ # Abstract
20
+
21
+ The widely applied density peak clustering (DPC) algorithm makes an intuitive cluster formation assumption that cluster centers are often surrounded by data points with lower local density and far away from other data points with higher local density. However, this assumption suffers from one limitation that it is often problematic when identifying clusters with lower density because they might be easily merged into other clusters with higher density. As a result, DPC may not be able to identify clusters with variational density. To address this issue, we propose a variational density peak clustering (VDPC) algorithm, which is designed to systematically and autonomously perform the clustering task on datasets with various types of density distributions. Specifically, we first propose a novel method to identify the representatives among all data points and construct initial clusters based on the identified representatives for further analysis of the clusters' property. Furthermore, we divide all data points into different levels according to their local density and propose a unified clustering framework by combining the advantages of both DPC and DBSCAN. Thus, all the identified initial clusters spreading across different density levels are systematically processed to form the final clusters. To evaluate the effectiveness of the proposed VDPC algorithm, we conduct extensive experiments using 20 datasets including eight synthetic, six real-world datasets and six image datasets. The experimental results show that VDPC outperforms two classical algorithms (i.e., DPC and DBSCAN) and four state-of-the-art extended DPC algorithms.
22
+
23
+ Keywords: Density peak clustering, representatives, local density analysis
24
+
25
+ # 1. Introduction
26
+
27
+ Clustering is an important unsupervised knowledge acquisition method, which divides the unlabeled data into different groups [1, 2]. Different clustering algorithms make different assumptions on the cluster formation, thus, most clustering algorithms are able to well handle at least one particular type of data distribution but may not well handle the other types of distributions. For example, K-means identifies convex clusters well [3], and DBSCAN is able to find clusters with similar densities [4]. Therefore, most clustering methods may not work well on data distribution patterns that are different from the assumptions being made and on a mixture of different distribution patterns. Taking DBSCAN as an example, it is sensitive to the loosely connected points between dense natural clusters as illustrated in Figure 1. The density of the connected points shown in Figure 1 is different from the natural clusters on both ends, however, DBSCAN with fixed global parameter values may wrongly assign these connected points and consider all the data points in Figure 1 as one big cluster.
28
+
29
+ ![](images/b3f13b8e3dc966ebec35a0c6f06cf187c8d59e37a796d384755e5695ff20f31f.jpg)
30
+
31
+ ![](images/e3970bafb74dc3276dad06b1e3ae9de86a2bc8b7e0061dca71d05bdddb5056cc.jpg)
32
+ Figure 1: An illustration of having connected points (yellow points) between two dense natural clusters.
33
+
34
+ ![](images/281ec7bcc21a7bb1931385b49ab5107390db47f53230753505aad24b610f5f1c.jpg)
35
+ Figure 2: Visualization of the Compound dataset and the clustering result generated by DPC. (a) The ground-truth of a typical variational density dataset Compound [9]. (b) The clustering result of DPC on dataset Compound, the two natural clusters with different density are merged into one cluster (the dark blue cluster on the right) by DPC.
36
+
37
+ Density peak clustering (DPC) is a recently proposed clustering algorithm [5], which receives more and more attention [6, 7, 8]. DPC makes a relatively novel assumption on the cluster formation that cluster centers are often surrounded by data points with lower density and they are also far away from other data points with higher density [5]. This cluster formation assumption leads to a series of advantages of DPC, namely connected points between different natural clusters are easily identified (assigning the connected points to the nearest density peak clusters, respectively), clusters are determined in a non-iterative manner, outliers are easily identified (naturally by DPC's assumptions), etc.
38
+
39
+ However, DPC suffers from one major limitation: DPC may not identify clusters with relatively lower density. As illustrated in Figure 2, for the Compound dataset [9], which comprises natural clusters with variational density, DPC obtains inferior results. The light blue (low density) and red (high density) natural clusters on the right-hand side of Figure 2(a) are indistinguishable by DPC (see Figure 2(b)).
40
+
41
+ The afore-illustrated drawback of DPC makes it difficult to identify clusters with variational density. To better solve this issue and strive for better performance, in this paper, we propose a variational density peak clustering (VDPC) algorithm. Our proposed VDPC algorithm takes the advantage of both DPC and DBSCAN. Specifically, if each natural cluster in the underlying dataset has similar density, it is efficient and straightforward to identify all these clusters by applying DBSCAN [4] thanks to its cluster formation assumption (see Section 2.3). By extending the capability of DBSCAN, for a variational density dataset, we first determine the different density levels of the identified initial cluster centers based on DPC's local density analysis (see Figure 3). We then identify the ultimate cluster centers in a divide-and-conquer approach in their respective density levels. To systematically and autonomously determine the different density levels of the identified initial cluster centers, we propose a novel method to let VDPC self-determine the number of density levels exist in the underlying dataset. Furthermore, we conduct extensive experiments on both synthetic and real-world datasets with single or multiple density levels. The experimental results show that
42
+
43
+ ![](images/4f64f76fdeb47c784d379c9de276153aef9252f73ac6d70683dbd0cf2283e3f2.jpg)
44
+ Figure 3: Illustration on how VDPC differentiates different density levels (see more details in Section 3).
45
+
46
+ our proposed VDPC algorithm outperforms DPC, DBSCAN and the state-of-the-art DPC variants. Our contributions in this paper are as follows:
47
+
48
+ (i) We propose a novel data distribution analysis method, which can systematically divide all the data points in a given dataset into respective density levels. Based on such autonomously obtained density levels, we can get more insights on the data distribution patterns of the dataset to further identify the proper subsequent cluster formation strategy.
49
+ (ii) Our proposed VDPC algorithm requires a small number of predetermined parameters, i.e., two, the same number as required by DBSCAN and one less than DPC. Moreover, the complexity of VDPC is on the same magnitude of DPC and DBSCAN.
50
+ (iii) We propose a novel dynamic cluster formation strategy, which combines the advantages of both DPC and DBSCAN. As such, our cluster formation strategy adapts according to the analyzed data distribution patterns to minimize the possibility of wrongly assigning the lower density points.
51
+ (iv) By evaluating the effectiveness of VDPC using both widely adopted synthetic and real-world datasets with performance comparisons, we show that VDPC outperforms other clustering methods, especially on datasets comprising multiple density levels.
52
+
53
+ The remainder of this paper is organized as follows. In Section 2, we review the density peak clustering algorithm, its extensions, and other clustering methods. In Section 3, we provide the technical details of VDPC with examples and illustrations. In Section 4, we present the experimental results with discussions. In Section 5, we conclude this paper and propose future work.
54
+
55
+ # 2. Related Work
56
+
57
+ In this literature review section, we introduce the fundamentals of DPC, recent extensions of DPC, and other clustering methods. In addition, we discuss the pros and cons of these reviewed methods and present how we propose VDPC by combining the advantages of prior models.
58
+
59
+ # 2.1. Density Peak Clustering (DPC) Algorithm
60
+
61
+ DPC articulates that each data point has two properties, namely $\rho$ as the local density of individual data points and $\delta$ as the minimum distance between one data point and another with higher density. For $N$ data
62
+
63
+ points in a dataset $D = (x_{1}, x_{2}, \dots, x_{N})^{T}$ , the similarity between data points $x_{i}$ and $x_{j}$ is defined as
64
+
65
+ $$
66
+ S \left(x _ {i}, x _ {j}\right) = \left\| x _ {i} - x _ {j} \right\|, \tag {1}
67
+ $$
68
+
69
+ where $||\cdot||$ denotes the Euclidean distance. The upper triangular similarity matrix of $S$ , denoted as $U$ , is defined as
70
+
71
+ $$
72
+ U = \left( \begin{array}{c c c c} S _ {1 1} & S _ {1 2} & \dots & S _ {1 N} \\ & S _ {2 2} & \dots & S _ {2 N} \\ & & \ddots & \vdots \\ & & & S _ {N N} \end{array} \right), \tag {2}
73
+ $$
74
+
75
+ where the elements in $U$ is arranged in the ascending order to form a one-dimensional vector $u = (u_{1}, u_{2}, \dots, u_{N(N - 1) / 2})$ . Then, the local density $(\rho)$ of data point $x_{i}$ is defined as
76
+
77
+ $$
78
+ \rho_ {i} = \sum_ {j} e ^ {- \left(\frac {\left\| x _ {i} - x _ {j} \right\|}{d _ {c}}\right) ^ {2}}, \tag {3}
79
+ $$
80
+
81
+ $$
82
+ d _ {c} = u _ {\text {round} \left(p c t \% * N (N - 1) / 2\right)}, \tag{4}
83
+ $$
84
+
85
+ where round $(\cdot)$ denotes to round a certain decimal up to the nearest integer. Therefore, the value of $\rho$ is a dependent of $pct$ , which is a parameter needs to be predefined in DPC. In essence, $pct$ is the cut-off value to define the neighborhood radius used for computation of local density.
86
+
87
+ Subsequently, parameter $\delta$ of data points $x_{i}$ , which denotes the minimum distance between one data point and another with higher density, is defined as
88
+
89
+ $$
90
+ \delta_ {i} = \min _ {j: \rho_ {j} > \rho_ {i}} S \left(x _ {i}, x _ {j}\right). \tag {5}
91
+ $$
92
+
93
+ It is obvious that once the similarity matrix of $S$ is determined (see (2)), the values of $\delta_{i}$ are subsequently determined. The values of $\rho_{i}$ and $\delta_{i}$ of all the data points are then used to plot the 2D decision graph (see Figure 4). According to DPC's assumption, cluster centers (i.e., density peaks) should have both high $\rho$ and high $\delta$ values. Based on the decision graph, users are required to manually identify cluster centers by specifying a rectangle on the decision graph (see the red rectangle in Figure 4), whereby the data points inside this red rectangle are the user-identified cluster centers. To specify the rectangle of cluster centers, users need to provide the coordinates of the bottom-left vertex, denoted as $(\rho_{u},\delta_{u})$ . Therefore, DPC requires users to set the values of three parameters, namely $pct$ , $\rho_{u}$ and $\delta_{u}$ .
94
+
95
+ Once the cluster centers are manually identified, each remaining data point is sequentially assigned to its nearest higher density neighbor so as to form all clusters. For example, the clustering results shown in Figure 2(b) are obtained by the identified cluster centers shown in Figure 4.
96
+
97
+ By comparison with DPC, our proposed VDPC automates the process of identifying cluster centers and further extends the capability of handling clusters with variational density.
98
+
99
+ # 2.2. Shared Nearest Neighbour Clustering Algorithm (SNNC)
100
+
101
+ $k$ Nearest Neighbour (KNN) is a well-established classification and regression method. Shared Nearest Neighbour clustering algorithm (SNNC) is built upon KNN, which only requires one user-defined parameter $k$ (number of nearest neighbours) [10]. For two data points $x_{i}$ and $x_{j}$ , $knn(x_{i})$ denotes $k$ nearest neighbours of $x_{i}$ and $knn(x_{j})$ denotes $k$ nearest neighbours of $x_{j}$ , where the neighbours are identified based on the Euclidean distance. If $knn(x_{i})$ and $knn(x_{j})$ have more than one shared nearest neighbours, data points $x_{i}$ and $x_{j}$ are deemed as belonging to one cluster. The overall cluster formation process of SNNC is shown in Algorithm 1. SNNC has been widely adopted to improve the performance of other clustering algorithms (e.g., [11, 12]).
102
+
103
+ In this paper, we propose an extension of SNNC (see Section 3.3.2) to enable VDPC to efficiently determine the cluster formation strategy in certain circumstances.
104
+
105
+ ![](images/c2d5430aae5a300b29f121fd02d102ddb8e1827b6ecbbb0b0f6ce7fbe15f4105.jpg)
106
+ Figure 4: The decision graph of dataset Compound generated by DPC ( $pct = 1.9$ ), the data points in the red rectangle are selected as cluster centers.
107
+
108
+ Algorithm 1: The shared nearest neighbour clustering algorithm (SNNC)
109
+ Input: dataset $D$ , user-defined parameter $k$ , and adjacency matrix $A$ Output: clustering results
110
+ 1 $A(i,j) = \left\{ \begin{array}{ll}1, & knn(x_i)\cap knn(x_j) > 1\\ 0, & \text{otherwise} \end{array} \right.$
111
+ 2 $N\gets$ the number of data points in $D$ .
112
+ 3 for $i = 1\rightarrow N$ do
113
+ 4 for $j = 1\rightarrow N$ do if $A(i,j) = 1$ then data points $x_{i}$ and $x_{j}$ belong to the same cluster; else else data points $x_{i}$ and $x_{j}$ belong to different clusters; end
114
+ 10 end
115
+ 11 end
116
+ 12 each remaining data point that does not belong to any cluster becomes a cluster;
117
+ 13 output the clustering results.
118
+
119
+ # 2.3. DBSCAN
120
+
121
+ DBSCAN is a classical density-based clustering algorithm, which requires two important parameters: $Eps$ as the radius of underlying neighborhood and MinPts as the minimum number of data points within the neighborhood [4]. DBSCAN has the following definitions: Data point $x_{p}$ is a core point if at least MinPts data points have the distance of less than $Eps$ away from $x_{p}$ (including data point $x_{p}$ itself); A data point $x_{p}$ is directly density-reachable from a core point $x_{q}$ if $x_{p}$ is within distance $Eps$ from $x_{q}$ ; A data point $x_{p}$ is density-reachable from another data point $x_{q}$ if there is a chain of data points $\{x_{p},\dots ,x_{o},x_{o + 1},\dots ,x_{q}\}$ such that any data point $x_{o + 1}$ is directly density-reachable from $x_{o}$ in this chain. Finally, DBSCAN generates clusters as follows: If $x_{p}$ is a core point, then it forms a cluster together with all data points that are density-reachable from it.
122
+
123
+ The performance of DBSCAN heavily depends on the predetermined parameter values. Specifically, if a larger value of $Eps$ is used, the number of noises (outliers that do not belong to any dense cluster) is generally smaller; if a larger value of MinPts is used, the number of noises is generally larger. Through conducting extensive experiments, we find that DBSCAN is able to accurately identify clusters with similar
124
+
125
+ densities but it is sensitive to the connected points (see Figure 1). In this research, our proposed VDPC incorporates DBSCAN's key strategies to complement the capability of DPC.
126
+
127
+ # 2.4. Extensions of DPC
128
+
129
+ Due to the reliable performance and the low complexity of DPC, there are many extended DPC models proposed in the literature. Most of these extensions improve the original DPC algorithm in two perspectives, namely similarity measurement and cluster formation strategy. Instead of geometric distance measures, feature learning methods may obtain better representations of the given dataset so as to achieve better clustering results. Along this line of work, Li et al. designed a new density measure based on tree structure [13]. Liu et al. redefined DPC's parameters using SNN similarity [14]. Chen et al. proposed an adaptive density clustering algorithm based on a new density measure, which finds density peaks in different density regions [15]. Xu et al. proposed to use density-sensitive similarity to identify manifold datasets [16]. For identifying clusters with different density, Wang et al. proposed IDPC, which computes a new relative density for individual data points and then takes a two-step strategy to obtain the final clusters [17]. Many other extensions of DPC aimed to improve the clustering formation strategy striving for better clustering results. Liu et al. proposed the ADPC-KNN algorithm by autonomously merging initial clusters if they are density-reachable [18]. Hou et al. introduced a new concept of relative density relationship to identify cluster centers [7]. Fang et al. developed a within-cluster similarity-based core fusion strategy [19]. Du et al. proposed an improved DPC algorithm based on KNN and PCA (Principal Component Analysis) to solve the issue that DPC may neglect certain clusters and obtain inferior results on high dimensional data [20]. Xie et al. proposed an improved DPC algorithm based on the fuzzy weighted KNN [21]. Chen et al. proposed a novel clustering algorithm named CLUB, which finds the density backbone of clusters on the basis of KNN and SNN [22]. Hou et al. proposed density normalization to relieve the influence of the local density criterion [23]. Mohamed et al. proposed a KNN-based DPC using the SNN to identify dense regions [8]. Chen et al. replaced density peak with density core, which consists of multiple peaks and still retains the shape of clusters [24]. To identify clusters with different density, Xie et al. used density-ratio to replace $d_{c}$ (see (3)) [25].
130
+
131
+ Nonetheless, all the afore-reviewed studies did not consider to improve DPC from the perspective of conducting an overview analysis on the clusters' property based on the data distribution. In this paper, we propose a systematic and autonomous analysis method to distinguish the different density levels exist in the given dataset, which may have variational density distributions patterns. Thus, we transform the complex variational density clustering problem into relatively straightforward uniform density clustering problems in the respective density levels. In the following section, we present the technical details of VDPC.
132
+
133
+ # 3. Variational Density Peak Clustering (VDPC) Algorithm
134
+
135
+ In this research, we propose a novel Variational Density Peak Clustering (VDPC) algorithm, which consists of the following three key steps: (i) representatives selection and initial clusters formation, (ii) dividing the initial cluster centers into density levels by systematically analyzing the distribution of data points, and (iii) final cluster formation. These three key steps are introduced in details in the subsequent subsections.
136
+
137
+ # 3.1. Representatives Selection and Initial Cluster Formation
138
+
139
+ Representatives, denoted as $r$ , are often selected to find the backbone of the given data points [26], which provides us the insights on the data distribution. The typical way to select representatives is through clustering, i.e., the identified cluster centers are often the representatives that we want to obtain. In VDPC, we first extend DPC to find the representatives. Specifically, we propose a parameter on the cut-off value of $\delta$ , denoted as $\delta_t$ , to select the representatives on the 2D decision graph (see Figure 4). Please note that although we use the same decision graph of DPC to determine the initial cluster centers in VDPC, we do not set the cut-off value of local density (e.g., $\rho_u$ used in DPC). Therefore, VDPC does not require any human intervention to determine the cluster centers.
140
+
141
+ ![](images/1b330b27d801dcba571237b0236910a6f65238abc6fe02a6153b29c6dae8a0c9.jpg)
142
+ (a)
143
+
144
+ ![](images/75e3cb271326991f5fd75c237f441bcb801085b0749ec98022dd531c2f3cfc44.jpg)
145
+ (b)
146
+ Figure 5: Representatives selection and initial cluster formation. (a) The representatives of dataset Compound [9] (identified in the red rectangle) $(\delta_t = 1.39)$ . (b) The initial clusters of dataset Compound are obtained by applying the clustering formation strategy of DPC on all the identified representatives.
147
+
148
+ Definition 1 (Representatives $(r)$ ). The data points with higher $\delta$ values are selected as representatives as follows:
149
+
150
+ $$
151
+ r = \left\{x _ {i} \mid \delta_ {i} \geq \delta_ {t} \right\}. \tag {6}
152
+ $$
153
+
154
+ As illustrated in Figure 5(a), for dataset Compound, when $\delta_t$ is set to 1.39 (determined by heuristically fine-turing), the representatives in the red rectangle are selected. If we take these representatives as the initial cluster centers. The corresponding clusters obtained by applying DPC are shown in Figure 5(b) for illustration. Note that for a representative $x_i$ , we denote the corresponding initial cluster centered on $x_i$ as $C_{x_i}$ .
155
+
156
+ Note that in this initial step, we consider all data points fulfilling selection criterion of $\delta_i \geq \delta_t$ as representatives regardless of their $\rho$ values. The key reason we deem $r$ as appropriate representatives (see (6)) is as follows: If a representative (cluster center) has a higher $\delta$ value, it is far away from other centers with higher local density according to the definition of parameter $\delta$ (see (5)). As shown in Figure 5(b), these selected representatives can be used to form initial clusters (assign all the data points to their corresponding representatives like DPC) and the distribution of these initial clusters helps us to understand the intrinsic structure of the underlying dataset.
157
+
158
+ # 3.2. Dividing Representatives into Different Density Levels
159
+
160
+ After identifying the representatives and their corresponding initial clusters, we analyze their local density ( $\rho$ in DPC) to better understand the intrinsic structure of the underlying dataset. This step mainly aims to divide the whole dataset (including the identified representatives) into a number of levels, wherein each level only comprises data points of similar density.
161
+
162
+ Specifically, we first project the identified representatives from the 2D decision graph onto the $\rho$ -axis (see Figure 6), i.e., we tentatively ignore the $\delta_{i}$ values and only consider the $\rho_{i}$ values for now. We then divide the $[min(\rho), max(\rho)]$ range into num equal width segments. As such, the width $w$ of each segment can be computed as follows:
163
+
164
+ $$
165
+ w = \frac {\operatorname {m a x} (\rho) - \operatorname {m i n} (\rho)}{\operatorname {n u m}}, \tag {7}
166
+ $$
167
+
168
+ where $\max(\rho)$ and $\min(\rho)$ denote the largest and the smallest $\rho$ values of all the representatives, respectively. Because when we conduct all experiments in this research work, we always set num to a constant value, we do not consider it as a parameter that needs to be predefined by users. The heuristic setting of this parameter num is discussed in Section 4.1. Furthermore, we present the definitions of gap and local density levels, which are two important terms used in VDPC, as follows.
169
+
170
+ ![](images/0d1a80d5a1bb8edcba7ee3e221402d53d325c3f726f323b0bad846ee113705ed.jpg)
171
+ Figure 6: The density levels are determined by projecting the identified representatives onto the $\rho$ -axis. The red circles represent the representatives and each segment has an equal width of $w$ . For the Compound dataset, $w = 1.4607$ (see (7)) and the number of density levels $numl = 2$ .
172
+
173
+ Definition 2 (gap (denoted as gap)). For the identified representatives projected onto the $\rho$ -axis, if the distance between any pair of the adjacent representatives is at least twice larger than the width $(w)$ of a segment, we deem there is a gap in between and the width of the gap, denoted as gap, is defined as the distance between the two adjacent representatives. Mathematically, we have the following definition:
174
+
175
+ $$
176
+ g a p _ {i} = \left\{ \begin{array}{l l} \rho_ {i + 1} - \rho_ {i}, & i f \quad \rho_ {i + 1} - \rho_ {i} \geq 2 \cdot w, \forall i \in [ 0, N - 1 ], \rho_ {i + 1} > \rho_ {i}, \\ 0, & o t h e r w i s e. \end{array} \right. \tag {8}
177
+ $$
178
+
179
+ If $\forall gap_i = 0$ , we say there is no gap in the underlying dataset. Otherwise, we say there are $|gap_i > 0|$ number of gaps in the underlying dataset. As illustrated in Figure 6, the range of [1.375, 8.392] is the only identified gap in this case.
180
+
181
+ Definition 3 (density levels (denoted as $l$ )). The identified gap(s) divide(s) the overall range $(\rho)$ of representatives into $numl = |gap_i > 0| + 1$ intervals. Moreover, we denote the identified intervals as density levels (denoted as $l$ ).
182
+
183
+ As illustrated in Figure 6, the overall range of representatives is [0.0354, 14.6423], and the range of the only gap is [1.375, 8.392] in this case (see (8)). Obviously, this gap divides the representatives into two different intervals (i.e., density levels): lower density level [0.0354, 1.375] and higher density level [8.392, 14.6423]. Generally speaking, the difference between two different density levels is large. For example, with reference to Figure 6, the average local density of all the representatives in the two density levels is computed as $\overline{\rho l_1} = 0.3685$ and $\overline{\rho l_2} = 11.3278$ , respectively. The relative difference in terms of ratio $\overline{\rho l_2} / \overline{\rho l_1} \approx 31$ is large. As a result, Definition 2 on the identification of whether there is any gap exists in the underlying dataset effectively helps VDPC to systematically determine whether there is significant difference among all the data points in terms of their local density.
184
+
185
+ Note that all the representatives are now divided into one or more density levels and all the data points are already assigned to different representatives to form initial clusters. Thus, all the initial clusters also fall into one or more density levels. In the following subsection, we introduce how the data points may be reassigned according to the identified number of density levels so as to accurately identify clusters with variational density.
186
+
187
+ # 3.3. Final Cluster Formation
188
+
189
+ After dividing all the representatives into numl density levels, we can further regulate the subsequent cluster formation process based on the value of numl. Specifically, we apply different cluster formation processes for numl = 1 and numl ≥ 2 correspondingly. When numl = 1, there is no gap found among the representatives, meaning all the clusters have similar density that falls on the same level. When numl ≥ 2, the clusters have significantly different (i.e., variational) density; therefore, further investigations are needed in this scenario. We deem a dataset has similar density if the corresponding numl is found as 1. On the other hand, a dataset has variational density if numl ≥ 2. For example, as shown in Figure 6, the Compound dataset has two density levels, numl = 2, so it is a variational density dataset. We introduce the detailed cluster formation strategies in the following subsections.
190
+
191
+ # 3.3.1. Cluster Formation for Datasets with Similar Density
192
+
193
+ When $numl = 1$ , all the initial clusters fall into the same density level. In this situation, although all the initial clusters have similar density, some connected points may still exist (see Figure 1 as an example). Because DPC well handles the connected points that data points with lower $\rho$ values are assigned to the nearest higher density neighbors [5], we simply adopt DPC for the subsequent cluster formation. Specifically, we take all the identified representatives as the centers of the final clusters, then each remaining data point is sequentially assigned to its nearest higher density neighbor so as to generate the final clusters. In this scenario, the clustering strategy of VDPC is identical to DPC.
194
+
195
+ Note that the VDPC does not straightforwardly replicate DPC in this scenario (only the clustering strategy is adopted as the same as DPC), because the initial cluster centers are systematically identified in VDPC whereas the cluster centers have to be manually selected in DPC.
196
+
197
+ # 3.3.2. Cluster Formation for Datasets with Variational Density
198
+
199
+ When $numl \geq 2$ , initial clusters fall into different density levels with large gap(s) in between. In this scenario, the cluster formation strategy in different density levels should be different so as to effectively cope with the different data distribution patterns in the respective density levels. Specifically, we identify clusters with lower density in $l_{1}$ , and we propose an autonomous DBSCAN algorithm to group the remaining data points with uniform density in the $p$ th density level except $l_{1}$ ( $l_{p}, 2 \leq p \leq numl$ ).
200
+
201
+ In $l_{1}$ , there are two kinds of data points and they all have lower local density. The first kind are the boundary points locating at the boundary of clusters with higher density in $l_{p}$ , and the other kind are the data points in the clusters with lower density (see Figure 3). Boundary points are supposed to be a part of clusters with higher density, so we try to delete them from $l_{1}$ and reassign them to the nearest representatives in $l_{p}$ . To this purpose, we propose an autonomous Shared Nearest Neighbour Clustering (aSNNC) algorithm to group all the data points in $l_{1}$ into clusters. SNNC (see Section 2.2) requires users to predetermine the number of nearest neighbours $k$ [10]. In VDPC, we set a global $k$ value of SNNC in an autonomous way by heuristically setting $k = \lceil \sqrt{nl} \rceil$ , where $nl$ denotes the number of data points fall into $l_{1}$ . Please refer to Appendix A for more details on setting the heuristic value of $k$ .
202
+
203
+ For an initial cluster, from its center to its boundary, the local density of data points are getting lower and lower. The number of boundary points in $l_{1}$ is always small because most data points have been merged into initial clusters located in $l_{p}$ (in most real-world datasets, there are much lesser number of outliers than the number of data points in a dense cluster, see Section 4.4). Based on this nature of boundary points, we give the definition of boundary points in $l_{1}$ as follows.
204
+
205
+ Definition 4 (boundary points in $l_{1}$ $(BP)$ ). For the clusters generated by applying aSNNC in $l_{1}$ , denoted as $C_{l_1} = \{c_{l_1}^1,\dots ,c_{l_1}^i,\dots ,c_{l_1}^n\}$ , where $n$ denotes the total number of clusters generated in $l_{1}$ , if the number of data points in one cluster is smaller than the mean number of data points in $C_{l_1}$ , the corresponding cluster may consist of boundary points. As such, the boundary points $(BP)$ are identified as follows:
206
+
207
+ $$
208
+ B P = \left\{\text {a l l d a t a p o i n t s i n} c _ {l _ {1}} ^ {i} \mid \# c _ {i} < \frac {\sum_ {i = 1} ^ {n} \# c _ {i}}{n} \right\}, \tag {9}
209
+ $$
210
+
211
+ where $c_{l_1}^i$ denotes a cluster generated by applying aSNNC in $l_1$ , $\# c_i$ denotes the number of data points in cluster $c_l^i$ .
212
+
213
+ The remaining clusters in $l_{1}$ are the final clusters with lower density, denoted as $C_{low}$ :
214
+
215
+ $$
216
+ C _ {l o w} = \left\{c _ {l _ {1}} ^ {i} \mid \# c _ {i} \geq \frac {\sum_ {i = 1} ^ {n} \# c _ {i}}{n} \right\}. \tag {10}
217
+ $$
218
+
219
+ After identifying the clusters with lower density ( $C_{low}$ ) in $l_1$ , we reassign the identified boundary points ( $BP$ ) to the nearest representatives in $l_p$ . The data points $\{x_i | x_i \in D$ and $x_i \notin C_{low}\}$ , denoted as $|D| - |C_{low}|$ , have similar density in the respective density level $l_p$ , so we further employ DBSCAN to identify the remaining clusters. As afore-introduced in Section 2.3, the performance of DBSCAN highly depends on and is sensitive to the values of the two parameters: $Eps$ and $MinPts$ . In this research, we propose an
220
+
221
+ ![](images/bd7e49c37a7aff7dd262e497ead5de31b8a248232b8cafb53795d1e6bfe07e39.jpg)
222
+ Figure 7: Assume both clusters are found in density level $l_{p}$ . The red point is identified as $x_{low}$ , the blue point is identified as $x_{far}$ , then the length of the blue segment is set to $Eps$ .
223
+
224
+ autonomous DBSCAN (aDBSCAN) algorithm to systematically determine the values of the two parameters only based on the data distribution patterns observed in $l_{p}$ .
225
+
226
+ The parameter $Eps$ of DBSCAN determines whether data points are directly density-reachable from core points or not. In order to avoid boundary points previously reassigned to $l_{p}$ being identified as noises by DBSCAN, the value of $Eps$ should be set carefully to let them have a greater possibility of being density-reachable from core points. Thus, the parameter $Eps$ is heuristically defined as follows:
227
+
228
+ $$
229
+ E p s = \operatorname {s i m} (\lceil \sqrt {| C _ {x _ {l o w}} |} \rceil), \tag {11}
230
+ $$
231
+
232
+ $$
233
+ \operatorname {s i m} (i) = \left\| x _ {\text {f a r}} - x _ {i} \right\|, x _ {i} \in l _ {p}, \tag {12}
234
+ $$
235
+
236
+ where $sim$ denotes a vector of Euclidean distance sorted in an ascending order between data point $x_{far}$ and any other data points in $l_p$ , $x_{far}$ denotes the farthest boundary point in the initial cluster $C_{x_{low}}$ from its representative $x_{low}$ with the lowest local density in $l_p$ , $|C_{x_{low}}|$ denotes the number of data points in $C_{x_{low}}$ , and $(\cdot)$ denotes the index of the vector $sim$ . We use $x_{far}$ to compute $sim$ so that at least $\lceil \sqrt{|C_{x_{low}}|} \rceil$ data points are within distance $Eps$ from $x_{far}$ , as a result, the boundary points have a greater possibility of being density-reachable from core points. An example is shown in Figure 7 to depict the computing procedure of $Eps$ . Please refer to Appendix B for more details on the heuristic setting of the cut-off value $\lceil \sqrt{|C_{x_{low}}|} \rceil$ .
237
+
238
+ After obtaining the value of $Eps$ , we subsequently use it to systematically determine the value of MinPts. If the value of MinPts is set too high, the boundary points may be highly likely identified as noises; if the value of MinPts is set too low, the boundary points may be identified as core points. In order to avoid these extreme cases from happening, we take a balanced trade-off. Specifically, we first obtain the number of data points that have a shorter distance to the representative with the highest local density in $l_{p}$ than $Eps$ as follows:
239
+
240
+ $$
241
+ \operatorname {M i n P t s} _ {\text {h i g h}} = \sum_ {j} \chi \left(\left\| x _ {\text {h i g h}}, x _ {j} \right\| - E p s\right), x _ {\text {h i g h}} \in C _ {x _ {\text {h i g h}}}, x _ {j} \in C _ {x _ {\text {h i g h}}}, \tag {13}
242
+ $$
243
+
244
+ $$
245
+ \chi (v) = \left\{ \begin{array}{l l} 1, & v < 0, \\ 0, & v \geq 0, \end{array} \right. \tag {14}
246
+ $$
247
+
248
+ where $x_{high}$ denotes the representative with the highest local density in $l_p$ . Similarly, we then obtain the number of data points that have a shorter distance to $x_{far}$ (see (12)) than $Eps$ as follows:
249
+
250
+ $$
251
+ \operatorname {M i n P t s} _ {\text {l o w}} = \sum_ {j} \chi \left(\left\| x _ {\text {f a r}}, x _ {j} \right\| - E p s\right), x _ {\text {f a r}} \in C _ {x _ {\text {l o w}}}, x _ {j} \in C _ {x _ {\text {l o w}}}. \tag {15}
252
+ $$
253
+
254
+ To strive for a balanced, reasonable trade-off, we set the average of MinPtslow and MinPtshigh as the value of MinPts in aDBSCAN as follows.
255
+
256
+ $$
257
+ \operatorname {M i n P t s} = \lceil \frac {\operatorname {M i n P t s} _ {l o w} + \operatorname {M i n P t s} _ {h i g h}}{2} \rceil . \tag {16}
258
+ $$
259
+
260
+ ![](images/36684d707d5d5e22f3369d2432e4aec6bd91ab81c2353c9e59f08d96cb837de8.jpg)
261
+ Figure 8: By extending Figure 7, where the blue point is identified as $x_{far}$ , we further identify the yellow point as $x_{high}$ . The value of MinPts is then computed as the average of the numbers of data points within the respective $Eps$ radius, i.e., MinPts = $\lceil (3 + 9) / 2 \rceil = 6$ .
262
+
263
+ An example is shown in Figure 8 to illustrate the determination process of MinPts.
264
+
265
+ So far, we have shown that the parameter values in aDBSCAN are both autonomously determined (see (11) and (16), respectively). By applying aDBSCAN with the auto-obtained parameter values, the data points in the respective density level $(l_p)$ can be divided into either belong to the formed clusters or belong to the set of noises. Let $NCD$ denote the set of all data points in the formed clusters and let $CO$ denote the set of all data points identified as noises, then we use $|NCD|$ and $|CO|$ to denote the number of clusters in $NCD$ and the number of data points in $CO$ , respectively. Then, we further improve the clustering results of aDBSCAN by reexamining the micro-clusters in $NCD$ .
266
+
267
+ Definition 5 (micro-clusters in $NCD$ ). For all clusters in $NCD$ , if the local density of a cluster center is smaller than the averaged local density of all centers, we call its corresponding cluster a micro-cluster (denoted as $mc$ ) and we use $|mc|$ to denote the number of micro-clusters.
268
+
269
+ It is not guaranteed that all the micro-clusters obtained in $l_{p}$ constitute the final clusters. Based on Definition 5, the centers of the identified micro-clusters are already found as having relatively lower $\rho$ values, we need to further check whether they could be considered as the final cluster centers. To make such a decision, we apply a straightforward heuristic rule to check whether an unnecessary number of clusters have been identified in $l_{p}$ . The heuristic rule and the corresponding action are defined as follows: If $|mc| < \frac{|NCD|}{2}$ , it means the micro-clusters not only have smaller $\rho$ values, but also are small in number, then all the data points in $mc$ are merged into other nearest clusters (non-micro-clusters) in $NCD$ ; otherwise, i.e., $|mc| \geq \frac{|NCD|}{2}$ , it means the micro-clusters constitute the majority cases of the identified $|NCD|$ clusters, then the micro-clusters are deemed as the final clusters. After the identification of the final cluster centers, the remaining data points in $CO$ are sequentially assigned to the nearest cluster center of higher local density (see DPC's assignment process in Section 2.1).
270
+
271
+ For both kinds of different density levels $l_{1}$ and $l_{p}$ , we apply two different clustering methods: aSNNC and aDBSCAN (see Section 3.3.2). In order to verify the effectiveness of such identification pipeline, we conduct a heuristic validation by alternately applying aDBSCAN and aSNNC in $l_{1}$ and $l_{p}$ . The results (see Appendix C) show that the current configuration on applying aSNNC in $l_{1}$ and aDBSCAN in $l_{p}$ leads to the best performance, which suggests that our intuitive designs of aSNNC and aDBSCAN are appropriate.
272
+
273
+ # 3.4. Overall VDPC Procedures
274
+
275
+ Till now, we have introduced all the procedures of VDPC in the preceding subsections and we summary the overall algorithm in this subsection. Notably, as an extension to DPC, VDPC only requires two predefined parameter values, which is one number less than that required by DPC, namely $pct$ (adopted from DPC, see (3)) and $\delta_t$ . In VDPC, the processes of identifying the cluster centers and subsequent cluster assignment are all performed in a systematic way without the need to predetermine other parameter values. Comparing to DPC, wherein human intervention on the identification of cluster centers is required, VDPC is definitely autonomous throughout the overall clustering procedures. The overall VDPC algorithm is shown in Algorithm 2 and the corresponding flowchart is shown in Figure 9. To provide a better understanding of
276
+
277
+ ![](images/fb6caad0c44eb295f85b0026db9c82eeabe3d42a01021881db6384c17030ff68.jpg)
278
+ Figure 9: The flowchart of VDPC.
279
+
280
+ the VDPC procedures, we present the step-by-step illustrations in Figure 10.
281
+
282
+ Algorithm 2: Variational density peak clustering algorithm (VDPC)
283
+ Input: dataset $D$ , parameters pct and $\delta_t$ Output: assigned cluster indices for all data points in $D$
284
+ 1 obtain representatives according to pct and $\delta_t$ (see Section 3.1) ;
285
+ 2 obtain the number of different density levels numl (see Definition 3);
286
+ 3 if numl $\geq 2$ then
287
+ 4 clusters $C_{low}\gets$ clustering the data points in $l_{1}$ by applying aSNNC with the boundary points excluded (see (9));
288
+ 5 clusters and noises $\leftarrow$ clustering the remaining $|D| - |C_{low}|$ data points by applying aDBSCAN;
289
+ 6 micro-clusters $(mc)\gets$ clusters in $l_{p}$ that the local density of their center is smaller than the averaged local density of all centers (see Definition 5);
290
+ 7 $|mc|\gets$ the number of micro-cluster $(mc)$ ;
291
+ 8 if $|mc| < |NCD| / 2$ then
292
+ 9 assign all data points in the micro-clusters to the nearest centers of other clusters;
293
+ 10 end
294
+ 11 assign noises $(CO)$ to their nearest centers (see DPC's assignment process in Section 2.1);
295
+ 12 else
296
+ 13 take the identified representatives as centers and merge other data points into their nearest higher density centers;
297
+ 14 end
298
+ 15 output the final clustering results;
299
+
300
+ ![](images/cb970cd74397a05be851fff3e5682ce0eccdbf5a481e22c90fad375114dbde8b.jpg)
301
+ (a)
302
+
303
+ ![](images/c3f03d4772f3d74e743efa3d2ba233b51880c50dd69fb67df253f35670701d8e.jpg)
304
+ (b)
305
+
306
+ ![](images/df16eb509815d7378410f2104c01a29aec4879cc6525adc4f6a6ec4f0c5c420b.jpg)
307
+ (c)
308
+
309
+ ![](images/932c4c1f789d1a02f70a26c840fe93eaabb9cb901b71ce3613a59ee88857da16.jpg)
310
+ (d)
311
+
312
+ ![](images/032f372ed0b423e8d3599cb2172c8bd61aa1c42007668c5e885c2fabb8c45861.jpg)
313
+ (e)
314
+ Figure 10: Step-by-step illustrations of applying VDPC on the Compound dataset. (a) The ground-truth clusters given in the Compound dataset [27]. (b) The clustering results on data points fall in $l_{1}$ by applying aSNNC. (c) The final cluster $(C_{low})$ identified in $l_{1}$ . The data points in the top-left region are not identified as final clusters because they are identified as boundary points. (d) The remaining $|D| - |C_{low}|$ data points that require further analysis. (e) Results of applying aDBSCAN on the remaining $|D| - |C_{low}|$ data points, the black points represent noises, and the red, green and orange clusters are three identified micro-clusters. Note that the micro-clusters in this case are the final clusters because their size is large (see Section(3.3.2)). (f) Final clustering results after assigning noises and micro-clusters.
315
+
316
+ ![](images/90fe495c8969ee669465e6bcb0c42b74e706afd1e2351566229b58f6d97d2918.jpg)
317
+ (f)
318
+
319
+ # 3.5. Analysis of Time Complexity
320
+
321
+ In VDPC, the time complexity of similarities matrix computation (see (1)) is $O(n^{2})$ . For the scenario of cluster formation for datasets with non-variational densities, the time complexity is the same as that of DPC, which is $O(n^{2})$ . For the scenario of cluster formation for variational density datasets, the time complexity of both aSNNC and aDBSCAN is $O(n^{2})$ . Thus, the overall time complexity of VDPC is on the same magnitude as DPC and DBSCAN, which is $O(n^{2})$ .
322
+
323
+ # 4. Experimental Results and Discussions
324
+
325
+ In this section, we use eight synthetic datasets, six UCI datasets and six image datasets to comprehensively evaluate the effectiveness of the proposed VDPC algorithm. Datasets Jain, Flame, Aggregation, R15, Compound, Pathbased are from University of Eastern Finland<sup>1</sup>, large-scale datasets T58 and T710 are from Karypis Lab<sup>2</sup> and they have no groundtruth defined. Datasets German, Yeast, Pima, Heart, Spambase and Immunotherapy are from UCI datasets<sup>3</sup>. COIL-20, Olivetti Faces-100, Olivetti Faces, UMist Faces, mini-Corel5K and mini-Cafar10 consist of faces and general object images, respectively. The statistical information of all the datasets used in this paper are shown in Table 1. All the datasets used for experiments and the programming codes of VDPC can be downloaded from our github repository<sup>4</sup>.
326
+
327
+ We present the selected $pct$ and $\delta_t$ values for all the datasets in the respective experiments. In order to fairly compare the performances of all algorithms, we run all algorithms using the same machine (i9-10900K CPU and RAM 32 GB) and software (MATLAB R2020a) to obtain the clustering results.
328
+
329
+ Table 1: Dataset Features
330
+
331
+ <table><tr><td>Type</td><td>ID</td><td>Datasets</td><td>#Samples</td><td>#Dimensions</td><td>#Natural clusters</td></tr><tr><td>Synthetic</td><td>1</td><td>Jain [9]</td><td>373</td><td>2</td><td>2</td></tr><tr><td>Synthetic</td><td>2</td><td>Flame [28]</td><td>240</td><td>2</td><td>2</td></tr><tr><td>Synthetic</td><td>3</td><td>Aggregation [29]</td><td>788</td><td>2</td><td>7</td></tr><tr><td>Synthetic</td><td>4</td><td>R15 [30]</td><td>600</td><td>2</td><td>2</td></tr><tr><td>Synthetic</td><td>5</td><td>Compound [27]</td><td>399</td><td>2</td><td>6</td></tr><tr><td>Synthetic</td><td>6</td><td>Pathbased [9]</td><td>300</td><td>2</td><td>2</td></tr><tr><td>Synthetic</td><td>7</td><td>T58</td><td>8000</td><td>2</td><td>N/A</td></tr><tr><td>Synthetic</td><td>8</td><td>T710</td><td>10000</td><td>2</td><td>N/A</td></tr><tr><td>UCI</td><td>1</td><td>German</td><td>1000</td><td>24</td><td>2</td></tr><tr><td>UCI</td><td>2</td><td>Yeast</td><td>1484</td><td>8</td><td>3</td></tr><tr><td>UCI</td><td>3</td><td>Pima</td><td>768</td><td>8</td><td>2</td></tr><tr><td>UCI</td><td>4</td><td>Heart</td><td>300</td><td>13</td><td>2</td></tr><tr><td>UCI</td><td>5</td><td>Spambase</td><td>4601</td><td>57</td><td>2</td></tr><tr><td>UCI</td><td>6</td><td>Immunotherapy</td><td>90</td><td>7</td><td>12</td></tr><tr><td>Images</td><td>1</td><td>COIL-20</td><td>1440</td><td>128x128</td><td>20</td></tr><tr><td>Images</td><td>2</td><td>Olivetti Faces-100</td><td>100</td><td>64x64</td><td>10</td></tr><tr><td>Images</td><td>3</td><td>Olivetti Faces</td><td>400</td><td>64x64</td><td>40</td></tr><tr><td>Images</td><td>4</td><td>UMist Faces</td><td>575</td><td>112x92</td><td>20</td></tr><tr><td>Images</td><td>5</td><td>mini-Corel5K</td><td>100</td><td>192x128</td><td>10</td></tr><tr><td>Images</td><td>6</td><td>mini-Cafar10</td><td>1000</td><td>32x32</td><td>10</td></tr></table>
332
+
333
+ # 4.1. Sensitivity Test on Parameter num
334
+
335
+ The proposed VDPC requires two user-defined parameters ( $pct$ and $\delta_t$ ). It is because when conducting experiments, we set $num$ to a constant value, i.e., 10, according to the sensitivity test. Specifically, we select six datasets Jain, Flame, Aggregation, R15, Compound, and Pathbased in this sensitivity test because they vary in sizes (from 240 to 788), classes (from 2 to 7), and different density levels (1 or 2). As shown in Table 2, when we take different values of $num$ (namely 5, 8, 10, 12, 15), VDPC always obtains the best performance when $num = 10$ . Moreover, when we set $num = 10$ , datasets Jain, Flame, Aggregation, R15 are found having one density level, while the rest having two density levels. This finding suggests that by setting num to 10, different density levels can be effectively identified from different datasets while VDPC still obtains the best performance. Thus, for all datasets used in this paper, we always set $num = 10$ in VDPC.
336
+
337
+ Table 2: Clustering results (ARI) of VDPC using different num values
338
+
339
+ <table><tr><td rowspan="2">Datasets</td><td colspan="5">num</td></tr><tr><td>5</td><td>8</td><td>10</td><td>12</td><td>15</td></tr><tr><td>Jain</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td></tr><tr><td>Flame</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td></tr><tr><td>Aggregation</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td></tr><tr><td>R15</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>1.0000</td></tr><tr><td>Compound</td><td>0.4867</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>0.9859</td></tr><tr><td>Pathbased</td><td>1.0000</td><td>1.0000</td><td>1.0000</td><td>0.5003</td><td>0.5422</td></tr></table>
340
+
341
+ # 4.2. Benchmarking Models
342
+
343
+ To show the effectiveness of the proposed VDPC algorithm, we select two classical clustering (including DBSCAN [4] and original DPC [5]) and four state-of-the-art improved DPC algorithms (including DPC-
344
+
345
+ $\mathrm{KNN}^5$ [20], $\mathrm{McDPC}^6$ [6], $\mathrm{SNNDPC}^7$ [14] and $\mathrm{FKNN - DPC}^8$ [21]. The parameters used in each algorithm are introduced in Table 3. For fair comparisons, all the parameters values are fine-turned with an ample number of experiments and the respectively selected best performing parameter values are listed in Tables 4-6. To quantitatively compare the performance of all algorithms, we use two popular metrics, namely Adjusted Rand Index (ARI) [31] and Normalized Shared Information (NMI) [32].
346
+
347
+ Table 3: The parameter description of each algorithm
348
+
349
+ <table><tr><td>Algorithm</td><td>Required parameters with descriptions</td></tr><tr><td>DBSCAN</td><td>Eps: radius of reachable neighborhoodMinPts: min number of points within radius Eps to form a cluster</td></tr><tr><td>DPC</td><td>pct: a relative ratio to determine the cut-off distance dcρu: determined by users to select centers in the decision graphδu: determined by users to select centers in the decision graph</td></tr><tr><td>DPC-KNN</td><td>pct: a relative ratio to determine the cut-off distance dcnumber of principal components to be selected after applying PCA.k: number of nearest neighbors.</td></tr><tr><td>McDPC</td><td>γ: parameter used to perform ρ-cutθ: threshold used to perform δ-cutλ: threshold used to identify micro-clusterspct: a relative ratio to determine the cut-off distance dc</td></tr><tr><td>SNNDPC</td><td>k: number of nearest neighborsNC: number of centers</td></tr><tr><td>FKNN-DPC</td><td>k: number of nearest neighborsNC: number of centers</td></tr><tr><td>VDPC (ours)</td><td>pct: a relative ratio to determine the cut-off distance dcδt: parameter used to select representatives</td></tr></table>
350
+
351
+ # 4.3. Experiments on Synthetic Datasets
352
+
353
+ Table 4 shows evaluation metrics ARI and NMI of the first six synthetic datasets because datasets T710 and T58 have no ground-truth labels. The corresponding algorithm parameter values are listed in Table 4.
354
+
355
+ According to our proposed intrinsic structure analysis method, datasets Flame, Aggregation, R15, Jain and T58 have one density level, while datasets Compound, Pathbased and T710 have two density levels. Obviously, whether it is one density level dataset or two density levels datasets, VDPC always achieves the best results especially for the challenging variational density datasets. DPC is only able to obtain the best results on one density level datasets. DBSCAN does not achieve the best results largely due to the influence of connected points. McDPC achieves the best results on all datasets expect Compound because it cannot identify clusters with variational density in the higher density level. The other three state-of-the-art extended DPC methods (namely DPC-KNN, SNN-DPC and FKNN-DPC) only achieve the best results on a small number of synthetic datasets that noticeably, they do not perform well on datasets with variational density.
356
+
357
+ # 4.4. Experiments on UCI Real-world Datasets
358
+
359
+ The clustering results of six UCI datasets, namely German, Yeast, Pima, Heart, Spambase and Immunotherapy, are shown in Table 5. VDPC obtains the best ARI and NMI values for all datasets. The clustering results are encouraging because they suggest that VDPC has the ability of handling real-world datasets.
360
+
361
+ Table 4: Comparison of six clustering algorithms on synthetic datasets
362
+
363
+ <table><tr><td>Algorithms</td><td>Par</td><td>Val</td><td>ARI</td><td>NMI</td><td>Val</td><td>ARI</td><td>NMI</td></tr><tr><td colspan="5">Dataset Flame</td><td colspan="3">Dataset Aggregation</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>1/6</td><td>0.9280</td><td>0.8583</td><td>1.21/6</td><td>0.9828</td><td>0.9749</td></tr><tr><td>DPC</td><td>pct</td><td>5</td><td>1.0000</td><td>1.0000</td><td>4</td><td>1.0000</td><td>1.0000</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>1/2/3</td><td>1.0000</td><td>1.0000</td><td>0.5/2/3</td><td>0.9957</td><td>0.9884</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>2/0.001/3/4</td><td>1.0000</td><td>1.0000</td><td>0.5/0.1/2.9/4</td><td>1.0000</td><td>1.0000</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>5/2</td><td>0.9502</td><td>0.8994</td><td>15/7</td><td>0.9594</td><td>0.9555</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>6/2</td><td>1.0000</td><td>1.0000</td><td>20/7</td><td>0.7150</td><td>0.8618</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>5/5.5</td><td>1.0000</td><td>1.0000</td><td>4/2.9</td><td>1.0000</td><td>1.0000</td></tr><tr><td colspan="5">Dataset R15</td><td colspan="3">Dataset Compound</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>0.3/3</td><td>0.9018</td><td>0.8942</td><td>1/5</td><td>0.9103</td><td>0.8774</td></tr><tr><td>DPC</td><td>pct</td><td>5</td><td>0.9928</td><td>0.9942</td><td>5</td><td>0.6368</td><td>0.5263</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>0.5/2/3</td><td>0.9928</td><td>0.9942</td><td>5/2/8</td><td>0.5448</td><td>0.7423</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>1/0.001/1.02/0.1</td><td>0.9228</td><td>0.9765</td><td>0.5/0.01/3/1</td><td>0.6074</td><td>0.7781</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>10/15</td><td>0.9928</td><td>0.9942</td><td>4/6</td><td>0.8629</td><td>0.9120</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>27/15</td><td>0.9892</td><td>0.9913</td><td>15/6</td><td>0.8229</td><td>0.8362</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>5/1</td><td>0.9928</td><td>0.9942</td><td>1.9/1.39</td><td>1.0000</td><td>1.0000</td></tr><tr><td colspan="5">Dataset Jain</td><td colspan="3">Dataset Pathbased</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>2.9/20</td><td>1.0000</td><td>1.0000</td><td>1/4</td><td>0.6288</td><td>0.4577</td></tr><tr><td>DPC</td><td>pct</td><td>40</td><td>1.0000</td><td>1.0000</td><td>5</td><td>0.6600</td><td>0.4572</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>3/2/15</td><td>0.5692</td><td>0.5420</td><td>0.5/2/5</td><td>0.5448</td><td>0.7423</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>0.1/2/3.35/2</td><td>1.0000</td><td>1.0000</td><td>0.12/0.8/3.5/0.5</td><td>1.0000</td><td>1.0000</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>12/2</td><td>1.0000</td><td>1.0000</td><td>9/3</td><td>0.9294</td><td>0.9013</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>10/2</td><td>0.0562</td><td>0.2330</td><td>15/7</td><td>0.4729</td><td>0.5783</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>50/5.5</td><td>1.0000</td><td>1.0000</td><td>0.4/3.5</td><td>1.0000</td><td>1.0000</td></tr></table>
364
+
365
+ The best results are highlighted in boldface.
366
+
367
+ # 4.5. Experiments on Images Datasets
368
+
369
+ Image clustering is a challenging machine learning task, which can effectively assess the performance of the clustering algorithms. We select the following six popular images datasets to show the performances of all the clustering results:
370
+
371
+ (i) $COIL - 20$ . It is a popular greyscale image dataset from Columbia University Image Library, which comprises 1440 images. Each image has 128x128 pixels, which are viewed as 16,384 features. We take the matrix of size 1440x16384 as input data.
372
+ (ii) Olivetti Faces and Olivetti Faces-100. Olivetti Faces dataset comes from AT&T Labs, which comprises the facial images of 40 different persons (each has 10 images). Olivetti Faces-100 refers to the first 100 facial images of Olivetti Faces. Each image has 64x64 pixels, which are viewed as 4096 features. We use the image similarity measurement method CW-SSIM [33] to compute new representations of the original images in the same way as carried out in [5]. Finally, we take the similarity matrix of 400x400 as input data for dataset Olivetti Faces and take the similarity matrix of size 100x100 as input data for dataset Olivetti Faces-100.
373
+ (iii) Umist Faces. It is a popular greyscale facial image dataset, which comprises 575 images collected from 20 persons. Each image has 112x92 pixels, which are viewed as 10304 features. We take the similarity matrix of size $575 \times 10304$ as input data.
374
+ (iv) mini-Corel5K. It comprises 100 images, which were selected from the popular semantic RGB image dataset Corel5k. Corel5k has 50 semantic topics, where each topic has 100 images. The dataset mini-Corel5K has 10 semantic topics, where each topic has 10 images. Each image has $32 \times 32$ pixels, which are viewed as 1024 features. we take the similarity matrix of size $100 \times 100$ as input.
375
+ (v) mini-Cafar10. Cifar-10 is a small RGB image dataset to identify universal objects, which have 10 classes. The Cifar-10 dataset has 50000 training images and 10000 testing images, we select 1000 images (100 images from each class) from the testing images to form the mini-Cafar10 dataset.
376
+
377
+ Table 5: Comparison of six clustering algorithms on UCI datasets
378
+
379
+ <table><tr><td>Algorithms</td><td>Par</td><td>Val</td><td>ARI</td><td>NMI</td><td>Val</td><td>ARI</td><td>NMI</td></tr><tr><td colspan="5">Dataset German</td><td colspan="3">Dataset Yeast</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>8.1/2</td><td>0.0810</td><td>0.0056</td><td>0.2/2</td><td>0.0031</td><td>0.0157</td></tr><tr><td>DPC</td><td>pct</td><td>1</td><td>0.0042</td><td>0.0027</td><td>1</td><td>0.0014</td><td>0.0273</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>1/2/3</td><td>0.0542</td><td>0.0133</td><td>1/2/3</td><td>0.0034</td><td>0.0161</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>2/0.01/14/4</td><td>0.0551</td><td>0.0436</td><td>2/0.3/0.1/1</td><td>0.0211</td><td>0.0265</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>25/2</td><td>0.0531</td><td>0.0146</td><td>10/8</td><td>0.0142</td><td>0.0213</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>16/2</td><td>0.0446</td><td>0.0094</td><td>7/8</td><td>0.0112</td><td>0.0178</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>0.5/10</td><td>0.0856</td><td>0.0807</td><td>1/0.1</td><td>0.0273</td><td>0.2032</td></tr><tr><td colspan="5">Dataset Pima</td><td colspan="3">Dataset Heart</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>0.5/2</td><td>0.0023</td><td>0.0042</td><td>0.8/3</td><td>0.0818</td><td>0.1292</td></tr><tr><td>DPC</td><td>pct</td><td>1</td><td>0.0218</td><td>0.0058</td><td>2</td><td>0.1723</td><td>0.1537</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>0.5/7/3</td><td>0.0131</td><td>0.0035</td><td>0.2/2/2</td><td>0.0628</td><td>0.1180</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>0.02/1/0.6/5</td><td>0.1344</td><td>0.0644</td><td>2/1/1.6/0.2</td><td>0.1723</td><td>0.1507</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>7/2</td><td>0.0119</td><td>0.0029</td><td>22/2</td><td>0.1508</td><td>0.1297</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>7/2</td><td>0.0453</td><td>0.0175</td><td>16/2</td><td>0.0715</td><td>0.0860</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>1/0.19</td><td>0.1701</td><td>0.1636</td><td>0.01/1.6</td><td>0.1897</td><td>0.1518</td></tr><tr><td colspan="5">Dataset Spambase</td><td colspan="3">Dataset Immunotherapy</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>9000/2</td><td>0.0000</td><td>0.0000</td><td>400/2</td><td>0.0000</td><td>0.0000</td></tr><tr><td>DPC</td><td>pct</td><td>1</td><td>0.0643</td><td>0.0695</td><td>1</td><td>0.0212</td><td>0.1936</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>1/3/2</td><td>0.0155</td><td>0.0742</td><td>0.5/2/5</td><td>0.0046</td><td>0.2906</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>3/2/10/2</td><td>0.1296</td><td>0.0801</td><td>1/1/25/1</td><td>0.0500</td><td>0.1997</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>17/2</td><td>-0.0030</td><td>0.0063</td><td>23/12</td><td>0.0042</td><td>0.1165</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>15/2</td><td>0.0000</td><td>0.0000</td><td>20/12</td><td>0.0000</td><td>0.0000</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>2/20</td><td>0.1399</td><td>0.1013</td><td>1/30</td><td>0.1126</td><td>0.3657</td></tr></table>
380
+
381
+ The best results are highlighted in boldface.
382
+
383
+ The first four image datasets have single background and the last two image datasets have complex background. All the clustering algorithms take the same input matrix. As shown in Table 6, VDPC achieves the best clustering results on all the six image datasets. The clustering results on the Olivetti Faces-100 dataset are shown in Figure 11(b) for performance illustration.
384
+
385
+ Table 6: Comparison of six clustering algorithms on images datasets
386
+
387
+ <table><tr><td>Algorithms</td><td>Par</td><td>Val</td><td>ARI</td><td>NMI</td><td>Val</td><td>ARI</td><td>NMI</td></tr><tr><td colspan="5">Dataset COIL20</td><td colspan="3">Dataset Olivetti Faces-100</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>9000/2</td><td>0.0000</td><td>0.0028</td><td>0.85/2</td><td>0.5918</td><td>0.7979</td></tr><tr><td>DPC</td><td>pct</td><td>1</td><td>0.4809</td><td>0.7633</td><td>3</td><td>0.6023</td><td>0.7802</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>1/3/5</td><td>0.1862</td><td>0.4902</td><td>1/20/5</td><td>0.6790</td><td>0.8263</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>1/10/7800/1</td><td>0.2090</td><td>0.5345</td><td>1/0.1/0.83/100</td><td>0.5525</td><td>0.7788</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>14/20</td><td>0.6232</td><td>0.8546</td><td>5/10</td><td>0.6533</td><td>0.7983</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>12/20</td><td>0.2890</td><td>0.4780</td><td>6/10</td><td>0.3408</td><td>0.6621</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>1/6300</td><td>0.6306</td><td>0.8549</td><td>1/0.827</td><td>0.7475</td><td>0.8596</td></tr><tr><td colspan="5">Dataset Olivetti Faces</td><td colspan="3">Dataset UMist Faces</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>2600/3</td><td>0.0000</td><td>0.0000</td><td>30/6</td><td>0.0000</td><td>0.0000</td></tr><tr><td>DPC</td><td>pct</td><td>1</td><td>0.4257</td><td>0.8156</td><td>100</td><td>0.3685</td><td>0.6644</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>10/3/5</td><td>0.0833</td><td>0.7412</td><td>2/6/6</td><td>0.3321</td><td>0.6848</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>2/0.1/2936/100</td><td>0.4116</td><td>0.7016</td><td>2/1/1.6/0.2</td><td>0.1723</td><td>0.1507</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>5/40</td><td>0.3136</td><td>0.7622</td><td>8/20</td><td>0.3600</td><td>0.6649</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>4/40</td><td>0.2677</td><td>0.6742</td><td>9/20</td><td>0.2722</td><td>0.5072</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>10/1583</td><td>0.4702</td><td>0.8496</td><td>100/2936</td><td>0.4525</td><td>0.7296</td></tr><tr><td colspan="5">Dataset mini-Corel5K</td><td colspan="3">Dataset mini-Cafar10</td></tr><tr><td>DBSCAN</td><td>Eps/MinPts</td><td>3/2</td><td>0.0000</td><td>0.0000</td><td>4000/2</td><td>0.0000</td><td>0.0000</td></tr><tr><td>DPC</td><td>pct</td><td>90</td><td>0.1609</td><td>0.5312</td><td>1</td><td>0.0187</td><td>0.4089</td></tr><tr><td>DPC-KNN</td><td>pct/d/k</td><td>90/5/5</td><td>0.1505</td><td>0.5096</td><td>1/2/3</td><td>0.0083</td><td>0.2120</td></tr><tr><td>McDPC</td><td>γ/θ/λ/pct</td><td>1/0.1/0.83/10</td><td>0.1296</td><td>0.0801</td><td>1/0.1/1370/10</td><td>0.0193</td><td>0.3382</td></tr><tr><td>SNNDPC</td><td>k/NC</td><td>7/10</td><td>0.1640</td><td>0.3664</td><td>26/10</td><td>0.0170</td><td>0.0527</td></tr><tr><td>FKNN-DPC</td><td>k/NC</td><td>4/10</td><td>0.1225</td><td>0.3664</td><td>12/20</td><td>0.0164</td><td>0.0368</td></tr><tr><td>VDPC</td><td>pct/δt</td><td>90/0.83</td><td>0.1754</td><td>0.5621</td><td>10/1370</td><td>0.0194</td><td>0.4166</td></tr></table>
388
+
389
+ The best results are highlighted in boldface.
390
+
391
+ ![](images/2619863e75cded0600d4a04e3963407f8da22216d44bfe923ab256252fd2fbbd.jpg)
392
+ (a)
393
+
394
+ ![](images/beee6173dda337b0e36902b7059dc88bfaf9869613df02cd4848c265bce9676d.jpg)
395
+ (b)
396
+ Figure 11: Clustering results on dataset Olivetti Faces-100. (a) Olivetti Faces-100 clustering results generated by DPC. (b) Olivetti Faces-100 clustering results generated by VDPC.
397
+
398
+ # 5. Conclusion and Future Work
399
+
400
+ In this paper, we propose an effective variational clustering algorithm called VDPC, which has only two user-defined parameters. VDPC utilizes the advantages of two well-known methods DBSCAN and DPC to perform the clustering procedures in an autonomous way according to the proposed thought of local denisty levels. We use 20 datasets to evaluate the performance of VDPC, the experimental results show that VDPC outperforms two classical clustering algorithms (DBSCAN and DPC) and four state-of-the-art extended DPC algorithms in all cases.
401
+
402
+ In the future, for the two user-defined parameters $pct$ and $\delta_t$ in VDPC, we will try to find a heuristic method to determine their values. As such, it is easier for users to apply VDPC in different fields.
403
+
404
+ # 6. Acknowledgment
405
+
406
+ This research is supported by the Innovation and Entrepreneurship Program of Jiangsu Province (Grant No. JSSCBS20211048) and Jiangsu Provincial Universities of Natural Science General Program (Grant No. 21KJB520021).
407
+
408
+ # Appendix A Effect of Different $k$ Values in aSNNC on Clustering Results
409
+
410
+ The widely adopted settings of $k$ are $k = \lceil \sqrt{nl} \rceil$ and $k = \lceil \ln (nl) \rceil$ . For typical variational density datasets Compound and Pathbased, VDPC obtains the best results when $k = \lceil \sqrt{nl} \rceil$ as shown in Table 7. Thus, we set $k$ in aSNNC to a heuristic value $\lceil \sqrt{nl} \rceil$ .
411
+
412
+ Table 7: Clustering results (ARI) of applying different $k$ values in aSNNC
413
+
414
+ <table><tr><td>Datasets</td><td>Compound</td><td>Pathbased</td></tr><tr><td>k = √nl</td><td>1.0000</td><td>1.0000</td></tr><tr><td>k = [ln (nl)]</td><td>0.7964</td><td>1.0000</td></tr></table>
415
+
416
+ # Appendix B Effect of Different Eps Values in aDBSCAN on Clustering Results
417
+
418
+ For two representative variational density datasets Compound and Pathbased, when we set $Eps$ to $Eps = sim(\lceil \sqrt{|C_{x_{low}}|} \rceil)$ , VDPC obtains the best clustering results as shown in Table 8. Thus, we set $Eps$ in aDBSCAN to a heuristic value $sim(\lceil \sqrt{|C_{x_{low}}|} \rceil)$ .
419
+
420
+ Table 8: Clustering results (ARI) of applying different $Eps$ values in aDBSCAN
421
+
422
+ <table><tr><td>Datasets</td><td>Compound</td><td>Pathbased</td></tr><tr><td>Eps = sim(⌊√|Cxlow⌋)</td><td>1.0000</td><td>1.0000</td></tr><tr><td>Eps = sim(⌊ln(|Cxlow|⌋)</td><td>NaN</td><td>1.0000</td></tr></table>
423
+
424
+ ${NaN}$ : not number.
425
+
426
+ # Appendix C Applying aDBSCAN and aSNNC in Different Density Levels
427
+
428
+ To further evaluate our intuition that aSNNC works well for lower density level $l_{1}$ and aDBSCAN works well for $l_{p}$ , we conduct more experiments for all the algorithm-density level combinations and show the results in Table 9. For the two representative variational density datasets Compound and Pathbased, the proposed combination of aSNNC for $l_{1}$ and aDBSCAN for $l_{p}$ obtains the best results.
429
+
430
+ Table 9: Clustering results (ARI) of applying aDBSCAN and aSNNC in different density levels
431
+
432
+ <table><tr><td>Datasets</td><td colspan="2">Compound Pathbased</td></tr><tr><td>aSNNC (l1)+aSNNC (lp)</td><td>0.5208</td><td>1.0000</td></tr><tr><td>aDBSCAN (l1) +aDBSCAN (lp)</td><td>0.1914</td><td>0.0058</td></tr><tr><td>aDBSCAN (l1) +aSNNC (lp)</td><td>0.4415</td><td>0.0058</td></tr><tr><td>aSNNC (l1) +aDBSCAN (lp) (Used in VDPC)</td><td>1.0000</td><td>1.0000</td></tr></table>
433
+
434
+ # References
435
+
436
+ [1] C. Atilgan, B. T. Tezel, E. Nasiboglu, Efficient implementation and parallelization of fuzzy density based clustering, Information Sciences 575 (2021) 454-467.
437
+ [2] M. d'Errico, E. Facco, A. Laio, A. Rodriguez, Automatic topography of high-dimensional data sets by non-parametric density peak clustering, Information Sciences 560 (2021) 476-492.
438
+ [3] L. Bai, X. Cheng, J. Liang, H. Shen, Y. Guo, Fast density clustering strategies based on the k-means algorithm, Pattern Recognition 71 (2017) 375-386.
439
+ [4] M. Ester, H. P. Kriegel, J. Sander, X. Xiaowei, A density-based algorithm for discovering clusters in large spatial databases with noise, in: ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 1996, pp. 226-231.
440
+ [5] A. Rodriguez, A. Laio, Clustering by fast search and find of density peaks, Science 344 (2014) 1492-1496.
441
+ [6] Y. Wang, D. Wang, X. Zhang, W. Pang, C. Miao, A.-H. Tan, Y. Zhou, McDPC: Multi-center density peak clustering, Neural Computing and Applications (2020) 13465-13478.
442
+ [7] J. Hou, A. Zhang, N. Qi, Density peak clustering based on relative density relationship, Pattern Recognition 108 (2020) 107554.
443
+ [8] M. Abbas, A. El-Zoghabi, A. Shoukry, Denmune: Density peak based clustering using mutual nearest neighbors, Pattern Recognition 109 (2021) 107589.
444
+ [9] H. Chang, D. Y. Yeung, Robust path-based spectral clustering, Pattern Recognition 41 (1) (2008) 191-203.
445
+ [10] A. K. Patidar, J. Agrawal, N. Mishra, Analysis of different similarity measure functions and their impacts on shared nearest neighbor clustering approach, International Journal of Computer Applications 40 (16) (2012) 1-5.
446
+ [11] A. Mehta, O. Dikshit, Segmentation-based projected clustering of hyperspectral images using mutual nearest neighbour, IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing 10 (12) (2017) 5237-5244.
447
+ [12] F. Ros, S. Guillaume, Munec: a mutual neighbor-based clustering algorithm, Information Sciences 486 (2019) 148-170.
448
+ [13] Z. Li, Y. Tang, Comparative density peaks clustering, Expert Systems with Applications 95 (2018) 236-247.
449
+ [14] R. Liu, H. Wang, X. Yu, Shared-nearest-neighbor-based clustering by fast search and find of density peaks, Information Sciences 450 (2018) 200–226.
450
+ [15] J. Chen, P. Yu, A domain adaptive density clustering algorithm for data with varying density distribution, IEEE Transactions on Knowledge and Data Engineering 33 (2019) 2310-2321.
451
+ [16] X. Xu, S. Ding, L. Wang, Y. Wang, A robust density peaks clustering algorithm with density-sensitive similarity, Knowledge-Based Systems 200 (2020) 106028.
452
+ [17] Y. Wang, Y. Yang, Relative density-based clustering algorithm for identifying diverse density clusters effectively, Neural Computing and Applications (2021) 10141-10157.
453
+ [18] L. Yaohui, M. Zhengming, Y. Fang, Adaptive density peak clustering based on k-nearest neighbors with aggregating strategy, Knowledge-Based Systems 133 (2017) 208-220.
454
+ [19] F. Fang, L. Qiu, S. Yuan, Adaptive core fusion-based density peak clustering for complex data with arbitrary shapes and densities, Pattern Recognition 107 (2020) 107452.
455
+ [20] M. Du, S. Ding, H. Jia, Study on density peaks clustering based on k-nearest neighbors and principal component analysis, Knowledge-Based Systems 99 (2016) 135-145.
456
+ [21] J. Xie, H. Gao, W. Xie, X. Liu, P. W. Grant, Robust clustering by detecting density peaks and assigning points based on fuzzy weighted k-nearest neighbors, Information Sciences 354 (2016) 19-40.
457
+ [22] M. Chen, L. Li, B. Wang, J. Cheng, L. Pan, X. Chen, Effectively clustering by finding density backbone based-on kNN, Pattern Recognition 60 (2016) 486-498.
458
+ [23] J. Hou, A. Zhang, Enhancing density peak clustering via density normalization, IEEE Transactions on Industrial Informatics 16 (4) (2019) 2477-2485.
459
+ [24] Y. Chen, S. Tang, L. Zhou, C. Wang, J. Du, T. Wang, S. Pei, Decentralized clustering by finding loose and distributed density cores, Information Sciences 433 (2018) 510-526.
460
+ [25] J. Xie, Z.-Y. Xiong, Y.-F. Zhang, Y. Feng, J. Ma, Density core-based clustering algorithm with dynamic scanning radius, Knowledge-Based Systems 142 (2018) 58-70.
461
+ [26] D. Huang, C.-D. Wang, J.-S. Wu, J.-H. Lai, C.-K. Kwoh, Ultra-scalable spectral clustering and ensemble clustering, IEEE Transactions on Knowledge and Data Engineering 32 (6) (2019) 1212–1226.
462
+ [27] C. Zahn, Graph-theoretical methods for detecting and describing gestalt clusters, IEEE Transactions on Computers 20 (1) (1971) 68-86.
463
+
464
+ [28] L. Fu, E. Medico, Flame, a novel fuzzy clustering method for the analysis of dna microarray data, BMC Bioinformatics 8 (1) (2007) 1-15.
465
+ [29] I. E. Givoni, B. J. Frey, A binary variable model for affinity propagation, Neural Computation 21 (6) (2009) 1589-1600.
466
+ [30] C. J. Veenman, M. J. T. Reinders, E. Backer, A maximum variance cluster algorithm, IEEE Transactions on Pattern Analysis and Machine Intelligence 24 (9) (2002) 1273-1280.
467
+ [31] N. X. Vinh, J. Epps, J. Bailey, Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance, The Journal of Machine Learning Research 11 (2010) 2837-2854.
468
+ [32] P. A. Estevez, M. Tesmer, C. A. Perez, J. M. Zurada, Normalized mutual information feature selection, IEEE Transactions on Neural Networks 20 (2) (2009) 189-201.
469
+ [33] M. P. Sampat, Z. Wang, S. Gupta, A. C. Bovik, M. K. Markey, Complex wavelet structural similarity: A new image similarity index, IEEE Transactions on Image Processing 18 (11) (2009) 2385-2401.
2201.00xxx/2201.00641/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bb09a4799ceeb8d6f6f3a77cde88d619e72e66a9a926ce359787f39c7878413
3
+ size 1302563
2201.00xxx/2201.00641/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_content_list.json ADDED
@@ -0,0 +1,1094 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Electrical Load Forecasting Using Edge Computing and Federated Learning",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 81,
8
+ 63,
9
+ 916,
10
+ 131
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Afaf Taik and Soumaya Cherkaoui",
17
+ "bbox": [
18
+ 367,
19
+ 152,
20
+ 629,
21
+ 167
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "INTERLAB, Engineering Faculty, Université de Sherbrooke, Canada.",
28
+ "bbox": [
29
+ 267,
30
+ 167,
31
+ 733,
32
+ 183
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "{afaf.taik, soumaya.cherkaoui} $@$ usherbrooke.ca",
39
+ "bbox": [
40
+ 334,
41
+ 184,
42
+ 656,
43
+ 198
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract-In the smart grid, huge amounts of consumption data are used to train deep learning models for applications such as load monitoring and demand response. However, these applications raise concerns regarding security and have high accuracy requirements. In one hand, the data used is privacy-sensitive. For instance, the fine-grained data collected by a smart meter at a consumer's home may reveal information on the appliances and thus the consumer's behaviour at home. On the other hand, the deep learning models require big data volumes with enough variety and to be trained adequately. In this paper, we evaluate the use of Edge computing and federated learning, a decentralized machine learning scheme that allows to increase the volume and diversity of data used to train the deep learning models without compromising privacy. This paper reports, to the best of our knowledge, the first use of federated learning for household load forecasting and achieves promising results. The simulations were done using Tensorflow Federated on the data from 200 houses from Texas, USA.",
50
+ "bbox": [
51
+ 73,
52
+ 229,
53
+ 488,
54
+ 455
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Keywords—Federated Learning; Energy Load Forecasting; Edge Computing; Deep Neural Networks; LSTM; Smart Grid.",
61
+ "bbox": [
62
+ 73,
63
+ 455,
64
+ 488,
65
+ 481
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "I. INTRODUCTION",
72
+ "text_level": 1,
73
+ "bbox": [
74
+ 215,
75
+ 494,
76
+ 346,
77
+ 508
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Load forecasting is an essential part of the development of the smart grid. Long-term load forecasting is deemed necessary for infrastructure planning, while mid-term and short-term load forecasting are key tasks in system operations [1]. Day-to-day operational efficiency of electrical power delivery, in particular, requires an accurate prediction of short-term load profiles, which is based on collecting and analysing large volumes of high-resolution data from households. However, individual short-term load forecasting (STLF) has been proven to be a challenging task because of profile volatility. In fact, the electrical load of a house has a high correlation to its residents' behaviour, which is too stochastic and often hard to predict [2, 3].",
84
+ "bbox": [
85
+ 73,
86
+ 513,
87
+ 488,
88
+ 710
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Benchmarks for state-of-the-art methods [4, 5] have found that deep neural networks are a promising solution for the STLF problem at the household level, due to their ability to capture complex and non-linear patterns. Neural networks outperform other prediction methods such as Auto Regressive Integrated Moving Average (ARIMA)[6] and Support Vector Regression (SVR). Nevertheless, applying deep learning models alone will not lead to significant improvements, as models tend to suffer from overfitting [7]. An overfitted model is a model that learned the details of the training data including the noise, which affects its ability to generalize when applied to new data. To tackle this issue, it is recommended to increase the diversity and size of the used data by combining usage",
95
+ "bbox": [
96
+ 73,
97
+ 710,
98
+ 488,
99
+ 907
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "image",
105
+ "img_path": "images/29a086304edd72e9defb41b45bb0159b3abbc7de81d9722785dd0a8714a16bf6.jpg",
106
+ "image_caption": [
107
+ "Fig. 1: Iterative communications between clients and server in Federated Learning"
108
+ ],
109
+ "image_footnote": [],
110
+ "bbox": [
111
+ 535,
112
+ 232,
113
+ 885,
114
+ 369
115
+ ],
116
+ "page_idx": 0
117
+ },
118
+ {
119
+ "type": "text",
120
+ "text": "records from different households. Typically, proposed frameworks [8, 9] assume that all data records are transferred from smart meters to a centralized computational infrastructure through broadband networks to train models. Nevertheless, this assumption raises concerns related to privacy, since the load profiles reveal a lot of sensitive information, such as device usage and the household's occupancy. Sending such detailed data over networks makes it exposed to malicious interception and misuse.",
121
+ "bbox": [
122
+ 504,
123
+ 450,
124
+ 921,
125
+ 587
126
+ ],
127
+ "page_idx": 0
128
+ },
129
+ {
130
+ "type": "text",
131
+ "text": "To address privacy concerns while still increasing data records' volume and variety, a new on-device solution was recently proposed by the Machine Learning community: Federated Learning (FL) [10]. Federated Learning is a decentralized machine learning scheme, where each device participates in training a central model without sending any data. As illustrated in Fig.1, the server first initializes the model either arbitrarily or by using publicly available data. Then, the model is sent to a set of randomly selected devices (clients) for local training using their data. Each client sends to the server an update of the model's weights, which will be averaged and used to update the global model. This process will be repeated until the global model stabilizes.",
132
+ "bbox": [
133
+ 504,
134
+ 588,
135
+ 921,
136
+ 784
137
+ ],
138
+ "page_idx": 0
139
+ },
140
+ {
141
+ "type": "text",
142
+ "text": "The main purpose of this paper is to evaluate the use of Edge computing, together with the Federated Learning approach in the STLF challenge for electricity in households. Edge computing refers to data processing at the edge of a network as opposed to cloud or remote server processing. We use Long-short Term Memory (LSTM) [11], a deep neural network for forecasting time series, which uses previous observations of the house's electrical load to predict future",
143
+ "bbox": [
144
+ 504,
145
+ 786,
146
+ 921,
147
+ 907
148
+ ],
149
+ "page_idx": 0
150
+ },
151
+ {
152
+ "type": "aside_text",
153
+ "text": "arXiv:2201.11248v1 [cs.CE] 27 Jan 2022",
154
+ "bbox": [
155
+ 22,
156
+ 246,
157
+ 57,
158
+ 681
159
+ ],
160
+ "page_idx": 0
161
+ },
162
+ {
163
+ "type": "text",
164
+ "text": "ones. We study a group of houses that have similar properties (geographical location, type of building), on a short period of time to avoid the weather's fluctuations and seasonality impact. Federated learning is performed on houses grid Edge equipment. Edge equipment is usually present at the end of the electrical distribution system as a smart interface between the customer and the electric power supply, be it a smart meter or a more sophisticated equipment. Our contributions in this work can be summarized as follows: (1) We propose an enabling architecture for FL using Edge equipment in the smart grid; (2) We evaluate the potential gain of FL in terms of accuracy through simulations; and (3) we evaluate the potential network load gain through numerical results. To these contributions, we add the gain in privacy leveraged by decentralization and Edge computing.",
165
+ "bbox": [
166
+ 73,
167
+ 61,
168
+ 491,
169
+ 289
170
+ ],
171
+ "page_idx": 1
172
+ },
173
+ {
174
+ "type": "text",
175
+ "text": "The remainder of this paper is structured as follows: Section II discusses related works focusing on load prediction and privacy. In Section III, we define the proposed approach and used methods. Section IV introduces the simulations and numerical results. Then in Section V we discuss the limitations and future work. Section VI concludes the paper.",
176
+ "bbox": [
177
+ 73,
178
+ 289,
179
+ 490,
180
+ 381
181
+ ],
182
+ "page_idx": 1
183
+ },
184
+ {
185
+ "type": "text",
186
+ "text": "II. RELATED WORK",
187
+ "text_level": 1,
188
+ "bbox": [
189
+ 210,
190
+ 397,
191
+ 354,
192
+ 411
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "text",
198
+ "text": "Many recent research works used deep neural networks, and particularly Long-short term memory (LSTM) to tackle the short-term load forecasting challenge. In fact, benchmarks have proved LSTM's potential compared to other methods[12, 13], yet the results do not match the level of desired exactitude in terms of Root Mean Square Error (RMSE) and Mean Average Percentage Error (MAPE). In order to improve forecasting accuracy, authors in [14] propose to use a variant of LSTM that is a sequence-to-sequence LSTM, which gives better results for one-minute resolution data, but no significant improvement for the one-hour resolution compared to standard LSTM. Furthermore, other authors [4] consider the problem of finding the best LSTM network to be a hyperparameter tuning problem, and use the genetic algorithm to this end. They state that finding the best combination of window size and number of hidden neurons in each layer remains a probabilistic task.",
199
+ "bbox": [
200
+ 73,
201
+ 422,
202
+ 490,
203
+ 664
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "Some other works see that the problem is not simply an neural network architecture problem, and that ability of generalization of data-driven forecasting models is the real issue. In fact, many of the proposed models' accuracy drops when they are applied to new datasets [5]. Some works suggest to use complementary data about the weather [15] or records from the appliances [2]. While the weather has a real impact on the aggregated electrical consumption, the individual short-term load is more related to the occupants' behaviour[3, 16, 17]. However, collecting data from appliances around each house is an expensive and privacy-intrusive task.",
210
+ "bbox": [
211
+ 73,
212
+ 665,
213
+ 488,
214
+ 830
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "Another approach to enrich the training data is grouping data from several customers. Authors in [8] use clustering to group users with similar profiles, hence reducing the variance of uncertainty within groups. Authors in [9] propose a pooling technique that increases data's diversity to overcome the",
221
+ "bbox": [
222
+ 73,
223
+ 830,
224
+ 491,
225
+ 907
226
+ ],
227
+ "page_idx": 1
228
+ },
229
+ {
230
+ "type": "image",
231
+ "img_path": "images/fd1f7e10da0f2dfb70fab787c457c2868322543764f7bb8b2084c83aa892f018.jpg",
232
+ "image_caption": [
233
+ "Fig. 2: Network components and roles"
234
+ ],
235
+ "image_footnote": [],
236
+ "bbox": [
237
+ 511,
238
+ 61,
239
+ 929,
240
+ 210
241
+ ],
242
+ "page_idx": 1
243
+ },
244
+ {
245
+ "type": "text",
246
+ "text": "overfitting problem. Nonetheless, these methods are heavily centralized and are prone to privacy-issues.",
247
+ "bbox": [
248
+ 504,
249
+ 265,
250
+ 919,
251
+ 295
252
+ ],
253
+ "page_idx": 1
254
+ },
255
+ {
256
+ "type": "text",
257
+ "text": "Fine-grained consumption data sent over networks is subject to many privacy threats when leaked through unauthorized interception or eavesdropping [18]. Many efforts were conducted to protect the users' identities in the smart grid. For instance, authors in [19] propose a clustering-based method where each group of users who are geographically close receive a common serial number. However this method makes it hard to treat each client individually because of the anonymity. Other works' focus is masking the consumption data, where data aggregation is the most popular method [20, 21], but it goes in opposite directions with STLF requirements.",
258
+ "bbox": [
259
+ 504,
260
+ 296,
261
+ 921,
262
+ 462
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "In regards to user privacy and prediction accuracy, none of the aforementioned papers address both of these aspects. In the proposed work, we suggest to use the Edge Equipment that compose the Home Area Network (HAN) to carry out operations related to client selection and training neural network at the Edge following the federated learning scheme, allowing the use of data to train a global model without compromising the resident's privacy.",
269
+ "bbox": [
270
+ 504,
271
+ 462,
272
+ 921,
273
+ 583
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "text",
279
+ "text": "III. SYSTEM MODEL",
280
+ "text_level": 1,
281
+ "bbox": [
282
+ 637,
283
+ 595,
284
+ 790,
285
+ 609
286
+ ],
287
+ "page_idx": 1
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "We propose the network architecture shown in Fig.2 with two main components: a Multi-access Edge Computing (MEC) server [22] and clients. Clients are houses with Edge equipment which is essentially composed of smart-meters and other devices in the HAN. FL is used to build a global LSTM-based model for STLF. The training rounds are orchestrated by the MEC server and executed by the clients using their own electrical consumption data. In this section, we explain in detail LSTM and how it comes to use in the forecasting, as well as FL and how it is used in our system model.",
292
+ "bbox": [
293
+ 504,
294
+ 617,
295
+ 921,
296
+ 768
297
+ ],
298
+ "page_idx": 1
299
+ },
300
+ {
301
+ "type": "text",
302
+ "text": "A. Time series forecasting using LSTM",
303
+ "text_level": 1,
304
+ "bbox": [
305
+ 504,
306
+ 781,
307
+ 774,
308
+ 796
309
+ ],
310
+ "page_idx": 1
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "The prediction of the future electrical load in this work is achieved through the time series forecasting approach with LSTM. A time series refers to an ordered sequence of equally-spaced data points that represent the evolution of a specific variable over time. Time series forecasting is enabled through modeling the dependencies between the points of current data points and historical data, but the accuracy of the predictions",
315
+ "bbox": [
316
+ 504,
317
+ 801,
318
+ 921,
319
+ 907
320
+ ],
321
+ "page_idx": 1
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "relies heavily on the chosen model and the quality of historical data points.",
326
+ "bbox": [
327
+ 73,
328
+ 61,
329
+ 491,
330
+ 90
331
+ ],
332
+ "page_idx": 2
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "LSTM is a recurrent neural network (RNN) that is fundamentally different from traditional feedforward Neural networks, and more efficient than standard RNNs. Sequence learning is LSTM's Forte. It is able to establish the temporal correlations between previous data points and the current circumstances, while solving vanishing and exploding gradient problems that are common in RNNs. Gradient vanishing means that the norm of the gradient for long-term components gets smaller causing weights to never change at lower layers, while the gradient exploding refers to the opposite event [11]. This is achieved through its key components: the memory cell that is used to remember important states in the past, and the gates that regulate the flow of information. LSTM has three gates: the input gate, the output gate and the forget gate. They learn to reset the memory cell for unimportant features during the learning process. Almost all state of the art results in sequence learning are achieved with LSTM and its variants especially language translation and speech recognition. In the case of residential STLF, it is expected that the LSTM network would be able to form an abstraction of some residents' states from the provided consumption profile, maintain the memory of the states, and make a forecast of the future consumption based on the learnt information.",
337
+ "bbox": [
338
+ 73,
339
+ 90,
340
+ 491,
341
+ 439
342
+ ],
343
+ "page_idx": 2
344
+ },
345
+ {
346
+ "type": "text",
347
+ "text": "B. Federated Learning",
348
+ "text_level": 1,
349
+ "bbox": [
350
+ 73,
351
+ 449,
352
+ 235,
353
+ 465
354
+ ],
355
+ "page_idx": 2
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "Federated learning is a form of machine learning where most of the training process is done in a distributed way among devices referred to as clients. It was first proposed and implemented by Google on keyboards of mobile devices for next word prediction [23]. This approach is ideal for many cases: 1) When data is privacy sensitive, 2) when data is large in size compared to model updates, 3) highly distributed systems where the number of devices is orders of magnitude larger than nodes in a data center, 4) in supervised training when labels can be inferred directly from the user. Federated learning has also proven to be very useful when datasets are unbalanced or non-identically distributed.",
360
+ "bbox": [
361
+ 73,
362
+ 468,
363
+ 490,
364
+ 648
365
+ ],
366
+ "page_idx": 2
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "An iteration of federated learning goes as follows: First, a subset of clients is chosen and each of them receives the current model. In our case, clients are hosted at Edge equipment in houses (e.g. smart meters). Clients that were selected compute Stochastic Gradient Descent (SGD) updates on locally-stored data, then a server aggregates the client updates to build a new global model. The new model is sent back to another subset of clients. This process is repeated until the desired prediction accuracy is reached. The operations are detailed in Algorithm 1.",
371
+ "bbox": [
372
+ 73,
373
+ 650,
374
+ 488,
375
+ 800
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "In order to combine the client updates, the server uses the FederatedAveraging algorithm [10]. First, the initial global model is initialized randomly or is pre-trained using publicly available data. In each training round $r$ , the server sends a global model $w_{r}$ to a subset $K$ of clients who have enough data records and whose consumption load varies enough to enrich the training data. This condition was added to ensure",
382
+ "bbox": [
383
+ 73,
384
+ 801,
385
+ 491,
386
+ 907
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "that we have enough variation in terms of data points to give a representation of the occupants' regular consumption. Afterward, every client $k$ in the subset uses $n_k$ examples from its local data. In our case, the volume is related to how long the smart meter has been generating data and how many of it is saved locally. The used dataset is composed of sliding windows with a predetermined number of look-back steps. SGD is then used by each client $k$ to compute the average gradient $g_k$ , with a learning rate $\\eta$ . The updated models $w_k$ are sent to the server to be aggregated.",
393
+ "bbox": [
394
+ 504,
395
+ 61,
396
+ 921,
397
+ 214
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "code",
403
+ "sub_type": "algorithm",
404
+ "code_caption": [
405
+ "Algorithm 1 Federated Averaging Algorithm. $r_{max}$ is the maximum number of rounds. $\\eta$ is the learning rate and $N = \\sum_{k} n_{k}$"
406
+ ],
407
+ "code_body": "1: initialize the model in training round $r = 0$ \n2: \n3: while $r < r_{max}$ do \n4: Select subset $K$ of clients; \n5: \n6: for client $k$ in $K$ do \n7: if $\\sigma(\\text{monthlyload}) > \\text{threshold}$ then \n8: $k$ receives model $w_{r}$ ; \n9: \n10: $k$ computes average gradient $g_{k}$ with SGD; \n11: \n12: $k$ updates local model \n13: $w_{r+1}^{k} \\gets w_{r}^{k} - \\eta g_{k}$ ; \n14: \n15: $k$ sends updated model to server; \n16: end if \n17: end for \n18: server computes new global model using the equation $:w_{r+1} \\gets \\sum_{k=0}^{K} \\frac{n_{k}}{N} w_{r+1}^{k}$ ; \n19: start next round $r \\gets r + 1$ ; \n20: end while",
408
+ "bbox": [
409
+ 506,
410
+ 285,
411
+ 921,
412
+ 604
413
+ ],
414
+ "page_idx": 2
415
+ },
416
+ {
417
+ "type": "text",
418
+ "text": "However, the centralized model may not fit all the users' electrical consumption. A proposed solution to this problem is Personalization. Personalization is the focus of many applications that require understanding user behaviour and adapting to it. It consists on retraining the centralized model using user-specific data to build a personalized model for each user. This can be achieved through retraining the model for a small number of epochs locally using exclusively the user's data [24].",
419
+ "bbox": [
420
+ 504,
421
+ 619,
422
+ 921,
423
+ 755
424
+ ],
425
+ "page_idx": 2
426
+ },
427
+ {
428
+ "type": "text",
429
+ "text": "Federated learning has fewer privacy risks than centralized server storage, since even when data are anonymized, the users' identities are still at risk and can be discovered through reverse engineering. The model updates sent by each client are ephemeral and never stored on the server; weight updates are processed in memory and are discarded after aggregation. The federated learning procedure requires that the individual weight uploads will not be inspected or analyzed. This is still more-secure than server training because the network and the server cannot be entrusted with fine-grained user data. Some",
430
+ "bbox": [
431
+ 504,
432
+ 756,
433
+ 921,
434
+ 907
435
+ ],
436
+ "page_idx": 2
437
+ },
438
+ {
439
+ "type": "text",
440
+ "text": "data still have to be sent in an aggregated form for billing, but these data do not reveal many details. Techniques such as secure aggregation [25] and differential privacy[26] are being explored to enforce trust requirements.",
441
+ "bbox": [
442
+ 73,
443
+ 61,
444
+ 491,
445
+ 125
446
+ ],
447
+ "page_idx": 3
448
+ },
449
+ {
450
+ "type": "text",
451
+ "text": "C. Networking Load Gain",
452
+ "text_level": 1,
453
+ "bbox": [
454
+ 75,
455
+ 138,
456
+ 259,
457
+ 154
458
+ ],
459
+ "page_idx": 3
460
+ },
461
+ {
462
+ "type": "text",
463
+ "text": "To evaluate the gain in network load in FL contrast to centralized training, we first define the network load $L_{sC}$ for a server $s$ in centralized training in Eq. 1 and the network load in FL $L_{sF}$ in Eq. 2.",
464
+ "bbox": [
465
+ 73,
466
+ 162,
467
+ 490,
468
+ 223
469
+ ],
470
+ "page_idx": 3
471
+ },
472
+ {
473
+ "type": "text",
474
+ "text": "$S_{k - d}$ is the size of data sent by the client $k$ and $S_{m}$ is the size of the model. In the centralized training, $d_{k}$ is the number of hops between client $k$ and the server.",
475
+ "bbox": [
476
+ 73,
477
+ 222,
478
+ 491,
479
+ 268
480
+ ],
481
+ "page_idx": 3
482
+ },
483
+ {
484
+ "type": "equation",
485
+ "text": "\n$$\nL _ {s C} = \\sum_ {k = 1} ^ {N} S _ {k - d} \\times d _ {k} \\tag {1}\n$$\n",
486
+ "text_format": "latex",
487
+ "bbox": [
488
+ 204,
489
+ 277,
490
+ 488,
491
+ 321
492
+ ],
493
+ "page_idx": 3
494
+ },
495
+ {
496
+ "type": "equation",
497
+ "text": "\n$$\nL _ {s F} = S _ {m} \\times \\sum_ {r = 1} ^ {r _ {\\max }} \\sum_ {k = 1} ^ {K} d _ {k, r} \\tag {2}\n$$\n",
498
+ "text_format": "latex",
499
+ "bbox": [
500
+ 189,
501
+ 335,
502
+ 488,
503
+ 377
504
+ ],
505
+ "page_idx": 3
506
+ },
507
+ {
508
+ "type": "text",
509
+ "text": "where $d_{k,r}$ is the number of hops between the client $k$ selected in round $r$ and the server, and $K$ is the number of users in each subset.",
510
+ "bbox": [
511
+ 73,
512
+ 386,
513
+ 488,
514
+ 431
515
+ ],
516
+ "page_idx": 3
517
+ },
518
+ {
519
+ "type": "text",
520
+ "text": "Using Eq.1 and Eq.2, we define the gain in networking load as follows:",
521
+ "bbox": [
522
+ 73,
523
+ 431,
524
+ 488,
525
+ 460
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "equation",
531
+ "text": "\n$$\nG _ {s} = 1 - L _ {s F} / L _ {s C} \\tag {3}\n$$\n",
532
+ "text_format": "latex",
533
+ "bbox": [
534
+ 210,
535
+ 465,
536
+ 488,
537
+ 484
538
+ ],
539
+ "page_idx": 3
540
+ },
541
+ {
542
+ "type": "text",
543
+ "text": "IV. SIMULATION AND RESULTS",
544
+ "text_level": 1,
545
+ "bbox": [
546
+ 169,
547
+ 496,
548
+ 395,
549
+ 508
550
+ ],
551
+ "page_idx": 3
552
+ },
553
+ {
554
+ "type": "text",
555
+ "text": "A. Dataset Pre-Processing and Evaluation Method",
556
+ "text_level": 1,
557
+ "bbox": [
558
+ 73,
559
+ 521,
560
+ 423,
561
+ 537
562
+ ],
563
+ "page_idx": 3
564
+ },
565
+ {
566
+ "type": "text",
567
+ "text": "This research was conducted using data from Pecan Street Inc. Dataport site. Dataport contains unique, circuit-level electricity use data at one-minute to one-second intervals for approximately 800 homes in the United States, with Photovoltaics generation and Electrical Vehicles charging data for a subset of these homes [27]. We chose a subset of 200 clients who have similar properties from this dataset. It is composed of the same kind of houses (detached-family homes), located in the same area (Texas). The dataset is composed of records between January 1st 2019 and March 31st 2019 with a one-hour resolution data. The weather fluctuations in this period are low, so the seasonal factor can be ignored in this study. The data of each client is prepared to be ready for further analysis. First, we transform the data to be in a scale between 0 and 1. Then we transform the time series into sliding windows with look-backs of size 12 and a look-ahead of size 1. Finally, we split data into train and test subsets (90% for training and 10% for test). We also split the clients into two groups: 180 participating in the federated learning process, and 20 are left for further evaluation for how well the model can fit non-participating clients.",
568
+ "bbox": [
569
+ 73,
570
+ 542,
571
+ 488,
572
+ 861
573
+ ],
574
+ "page_idx": 3
575
+ },
576
+ {
577
+ "type": "text",
578
+ "text": "We use RMSE and MAPE to evaluate the model's performance with regard to the prediction error. RMSE allows us to quantify the error in terms of energy, while MAPE is a",
579
+ "bbox": [
580
+ 73,
581
+ 862,
582
+ 490,
583
+ 907
584
+ ],
585
+ "page_idx": 3
586
+ },
587
+ {
588
+ "type": "text",
589
+ "text": "percentage quantifying the size of the error relative to the real value. The expressions of RMSE and MAPE are as follows:",
590
+ "bbox": [
591
+ 504,
592
+ 61,
593
+ 921,
594
+ 92
595
+ ],
596
+ "page_idx": 3
597
+ },
598
+ {
599
+ "type": "equation",
600
+ "text": "\n$$\nR M S E = \\sqrt {\\frac {\\sum_ {i = 1} ^ {P} \\left(y _ {i} - \\hat {y} _ {i}\\right) ^ {2}}{N}} \\tag {4}\n$$\n",
601
+ "text_format": "latex",
602
+ "bbox": [
603
+ 609,
604
+ 99,
605
+ 921,
606
+ 138
607
+ ],
608
+ "page_idx": 3
609
+ },
610
+ {
611
+ "type": "equation",
612
+ "text": "\n$$\nM A P E = \\frac {100 \\%}{P} \\sum_ {i = 1} ^ {P} \\left| \\frac {y _ {i} - \\hat {y} _ {i}}{y _ {i}} \\right| \\tag{5}\n$$\n",
613
+ "text_format": "latex",
614
+ "bbox": [
615
+ 607,
616
+ 164,
617
+ 919,
618
+ 205
619
+ ],
620
+ "page_idx": 3
621
+ },
622
+ {
623
+ "type": "text",
624
+ "text": "where $\\hat{y}_i$ is the predicted value, $y_i$ is the actual value and $P$ is the number of predicted values.",
625
+ "bbox": [
626
+ 504,
627
+ 210,
628
+ 919,
629
+ 241
630
+ ],
631
+ "page_idx": 3
632
+ },
633
+ {
634
+ "type": "text",
635
+ "text": "B. Simulations setup",
636
+ "text_level": 1,
637
+ "bbox": [
638
+ 506,
639
+ 250,
640
+ 653,
641
+ 265
642
+ ],
643
+ "page_idx": 3
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "The simulations were conducted on a laptop with a 2,2 GHz Intel i7 processor and 16GB of memory and NVIDIA GeForce GTX 1070 graphic card. We used Tensorflow Federated 0.4.0 with Tensorflow 1.13.1 backend.",
648
+ "bbox": [
649
+ 504,
650
+ 268,
651
+ 921,
652
+ 329
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "Hyper-parameter tuning in deep learning models is important to obtain the best forecasting performance. However, in this work, we only focus on evaluating the federated learning paradigm. Previous work shows performance insensitivity to combinations of some layers and layer size, as long as we use multiple layers and that the number of hidden nodes is sufficiently large [28]. It was also suggested that very deep networks are prone to under-fitting and vanishing gradients. Following these rules, the initial model hyper-parameters (e.g. number of layers, and time steps to be considered) were chosen by random search on a randomly selected client's data. The retained model has two LSTM hidden layers composed of 200 neurons each. The loss function used is Mean squared error and the optimiser chosen is Adam. The model converges around the 20th epoch and thus we use close values for rounds and epochs.",
659
+ "bbox": [
660
+ 504,
661
+ 330,
662
+ 921,
663
+ 571
664
+ ],
665
+ "page_idx": 3
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "C. Numerical Results",
670
+ "text_level": 1,
671
+ "bbox": [
672
+ 506,
673
+ 580,
674
+ 656,
675
+ 594
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "1) Evaluated scenarios:",
682
+ "text_level": 1,
683
+ "bbox": [
684
+ 524,
685
+ 599,
686
+ 687,
687
+ 613
688
+ ],
689
+ "page_idx": 3
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "The different scenarios that were evaluated are summarized in Table I. As explained in the previous section, in each round, only a subset of clients train the model. We modify the number of clients in the subset selected in each round, to see the effect of larger subsets. We also vary the number of epochs of local training. In all the scenarios, the federated learning algorithm was run for 20 rounds.",
694
+ "bbox": [
695
+ 504,
696
+ 614,
697
+ 921,
698
+ 720
699
+ ],
700
+ "page_idx": 3
701
+ },
702
+ {
703
+ "type": "table",
704
+ "img_path": "images/803d4209a646b3bda5191f408f0afdc242b421d72fa9d6b113533410c17a4e6c.jpg",
705
+ "table_caption": [
706
+ "TABLE I: Used scenarios"
707
+ ],
708
+ "table_footnote": [],
709
+ "table_body": "<table><tr><td>Scenarios</td><td>Clients in subset</td><td>Local Epochs</td></tr><tr><td>1</td><td>5</td><td>1</td></tr><tr><td>2</td><td>20</td><td>1</td></tr><tr><td>3</td><td>5</td><td>5</td></tr><tr><td>4</td><td>20</td><td>5</td></tr></table>",
710
+ "bbox": [
711
+ 573,
712
+ 753,
713
+ 851,
714
+ 816
715
+ ],
716
+ "page_idx": 3
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "2) Results for global models:",
721
+ "text_level": 1,
722
+ "bbox": [
723
+ 522,
724
+ 832,
725
+ 723,
726
+ 845
727
+ ],
728
+ "page_idx": 3
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "The evaluated scenarios resulted in global models that are obtained following the federated learning approach. These models are evaluated in terms of RMSE and MAPE as shown in Tables II and III. Null consumption values have been",
733
+ "bbox": [
734
+ 504,
735
+ 847,
736
+ 921,
737
+ 907
738
+ ],
739
+ "page_idx": 3
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "disgarded when calculating MAPE. Table II summarizes the results for the participating clients in the different scenarios. In our case, the load forecast is on a granular level (single house) and on a short term (1 hour), therefore the values of MAPE achieved in Table II for various models are reasonable, and this level of accuracy is anticipated as similar values have been reported by previous works [28, 29]. These works also report that the forecasting accuracy tends to be low for short-term forecasting horizons. One of the most notable things we notice is that the global model fits some clients better than others when considering the fact that not all clients have similar profiles. We also notice that selecting a bigger number of clients in each round is preferable, but in cases where sending updates is more expensive in terms of networking, the difference can be compensated by using more local training epochs. The results are similar when applied to the set of clients who did not participate in the training.",
744
+ "bbox": [
745
+ 73,
746
+ 61,
747
+ 491,
748
+ 320
749
+ ],
750
+ "page_idx": 4
751
+ },
752
+ {
753
+ "type": "table",
754
+ "img_path": "images/262a0d1ecbc511fa5ca8d26bd8f1778d9000ff588eeb864fa0d5075189a0696e.jpg",
755
+ "table_caption": [
756
+ "TABLE II: Resulting RMSE and MAPE for global models in the considered scenarios for the 180 participating clients"
757
+ ],
758
+ "table_footnote": [],
759
+ "table_body": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.070</td><td>2.652</td><td>0.605</td><td>10.65%</td><td>83.35%</td><td>41.40%</td></tr><tr><td>2</td><td>0.045</td><td>2.55</td><td>0.578</td><td>9.18%</td><td>87.63%</td><td>38.39%</td></tr><tr><td>3</td><td>0.026</td><td>2.652</td><td>0.576</td><td>9.45%</td><td>96.84%</td><td>37.43%</td></tr><tr><td>4</td><td>0.047</td><td>2.68</td><td>0.583</td><td>9.71%</td><td>93.74%</td><td>38.91%</td></tr></table>",
760
+ "bbox": [
761
+ 80,
762
+ 372,
763
+ 480,
764
+ 446
765
+ ],
766
+ "page_idx": 4
767
+ },
768
+ {
769
+ "type": "table",
770
+ "img_path": "images/9e22bb702c1f88d10c64627b220cccfb2035be35e662f2453a0fdff418ebc7e7.jpg",
771
+ "table_caption": [
772
+ "TABLE III: Resulting RMSE and MAPE for global models in the considered scenarios for the 20 non-participant clients"
773
+ ],
774
+ "table_footnote": [],
775
+ "table_body": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.262</td><td>1.024</td><td>0.589</td><td>15.82%</td><td>60.72%</td><td>44.98%</td></tr><tr><td>2</td><td>0.241</td><td>0.979</td><td>0.550</td><td>16.08%</td><td>55.34%</td><td>40.95%</td></tr><tr><td>3</td><td>0.229</td><td>0.99</td><td>0.530</td><td>15.78%</td><td>53.98%</td><td>39.18%</td></tr><tr><td>4</td><td>0.235</td><td>1.004</td><td>0.543</td><td>16.04%</td><td>56.61%</td><td>41.15%</td></tr></table>",
776
+ "bbox": [
777
+ 80,
778
+ 503,
779
+ 480,
780
+ 578
781
+ ],
782
+ "page_idx": 4
783
+ },
784
+ {
785
+ "type": "text",
786
+ "text": "3) Behaviour of personalization:",
787
+ "text_level": 1,
788
+ "bbox": [
789
+ 89,
790
+ 590,
791
+ 315,
792
+ 604
793
+ ],
794
+ "page_idx": 4
795
+ },
796
+ {
797
+ "type": "text",
798
+ "text": "In this section, we study the effect of personalization on the performance of the models. First we test if re-training the model locally for the participant clients gives better results. Then we apply the same thing to the set of clients who did not participate in the training. The models were retrained for 5 epochs for each client. Results for the set of clients participating in the training are summarized in Table IV and for the non-participating clients in Table V. We notice an overall improvement of most of the models. For example, the model 1 has an overall improvement of $5.07\\%$ in terms of MAPE for the participating set of clients and of $4.78\\%$ on the non-participating clients set. However, for some clients, the performance can not be improved despite retraining, and this, as we mentioned earlier, is related to the quality of historical data points. Applying the models to these clients' consumption profiles results in very high MAPE, which affects the average results. These clients should be treated as outliers, nonetheless, this is beyond the scope of this study.",
799
+ "bbox": [
800
+ 73,
801
+ 604,
802
+ 490,
803
+ 876
804
+ ],
805
+ "page_idx": 4
806
+ },
807
+ {
808
+ "type": "text",
809
+ "text": "To illustrate the improvements on predictions using personalization, we randomly selected a client from the participant",
810
+ "bbox": [
811
+ 73,
812
+ 877,
813
+ 488,
814
+ 907
815
+ ],
816
+ "page_idx": 4
817
+ },
818
+ {
819
+ "type": "table",
820
+ "img_path": "images/d2fd12553fe6ea23820d557ddd74578348c1de89f53e6d9dadfc556d127899a7.jpg",
821
+ "table_caption": [
822
+ "TABLE IV: Resulting RMSE and MAPE after personalization over 180 clients"
823
+ ],
824
+ "table_footnote": [],
825
+ "table_body": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.0</td><td>2.47</td><td>0.550</td><td>8.13%</td><td>99.16%</td><td>36.33%</td></tr><tr><td>2</td><td>0.0</td><td>2.47</td><td>0.551</td><td>7.89%</td><td>91.23%</td><td>36.39%</td></tr><tr><td>3</td><td>0.0</td><td>2.371</td><td>0.536</td><td>7.64%</td><td>88.76%</td><td>34.27%</td></tr><tr><td>4</td><td>0.0</td><td>2.375</td><td>0.536</td><td>8.00%</td><td>82.14%</td><td>34.14%</td></tr></table>",
826
+ "bbox": [
827
+ 519,
828
+ 93,
829
+ 903,
830
+ 167
831
+ ],
832
+ "page_idx": 4
833
+ },
834
+ {
835
+ "type": "table",
836
+ "img_path": "images/45839514af8ce9ddf1afb5841139dddae1ab1bf3721aad92f1f69b5134ec001b.jpg",
837
+ "table_caption": [
838
+ "TABLE V: Resulting RMSE and MAPE after personalization for 20 non-participating clients"
839
+ ],
840
+ "table_footnote": [],
841
+ "table_body": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.232</td><td>0.905</td><td>0.516</td><td>18.35%</td><td>53.70%</td><td>40.20%</td></tr><tr><td>2</td><td>0.233</td><td>0.901</td><td>0.516</td><td>16.99%</td><td>58.68%</td><td>40.71%</td></tr><tr><td>3</td><td>0.235</td><td>0.909</td><td>0.516</td><td>15.79%</td><td>54.82%</td><td>39.49%</td></tr><tr><td>4</td><td>0.232</td><td>0.907</td><td>0.509</td><td>15.96%</td><td>52.96%</td><td>39.01%</td></tr></table>",
842
+ "bbox": [
843
+ 513,
844
+ 215,
845
+ 911,
846
+ 289
847
+ ],
848
+ "page_idx": 4
849
+ },
850
+ {
851
+ "type": "text",
852
+ "text": "set (client 4313) and a client from the non-participant set (client 8467). We applied the global model 4 and the corresponding personalized models. The actual load profiles and the predicted profiles are shown in Fig.3 and Fig.4. Both models fit the overall behaviour of the consumption profiles.",
853
+ "bbox": [
854
+ 504,
855
+ 316,
856
+ 919,
857
+ 391
858
+ ],
859
+ "page_idx": 4
860
+ },
861
+ {
862
+ "type": "text",
863
+ "text": "We conclude that we can indeed train powerful models for a population's consumption profiles using only a subset of the users forming it. For applications that have high accuracy requirements, the model can be retrained resulting in a personalized model that follows the profile's curves better, yielding more accurate predictions. Nonetheless, the predictions obtained with the global model can be a good starting point for new clients who don't have enough data for personalization.",
864
+ "bbox": [
865
+ 504,
866
+ 392,
867
+ 921,
868
+ 526
869
+ ],
870
+ "page_idx": 4
871
+ },
872
+ {
873
+ "type": "text",
874
+ "text": "4) Gain in network load:",
875
+ "text_level": 1,
876
+ "bbox": [
877
+ 509,
878
+ 527,
879
+ 686,
880
+ 541
881
+ ],
882
+ "page_idx": 4
883
+ },
884
+ {
885
+ "type": "text",
886
+ "text": "To illustrate the gain in the network load, we can consider the most basic case where the distance between all the clients and the MEC server is 1-Hop. The size of the model is $1,9\\mathrm{Kb}$ and the size of the used data is $16\\mathrm{Mb}$ . Using Eq.3, the gain in the scenarios 1 and 3 is $97\\%$ , while scenarios 2 and 4 result in a gain of $90\\%$ . This is a significant gain, especially when considering that the approach could be applied at the scale of a city or bigger, for example.",
887
+ "bbox": [
888
+ 504,
889
+ 542,
890
+ 921,
891
+ 664
892
+ ],
893
+ "page_idx": 4
894
+ },
895
+ {
896
+ "type": "text",
897
+ "text": "V. REMARKS & FUTURE WORK",
898
+ "text_level": 1,
899
+ "bbox": [
900
+ 601,
901
+ 676,
902
+ 823,
903
+ 690
904
+ ],
905
+ "page_idx": 4
906
+ },
907
+ {
908
+ "type": "text",
909
+ "text": "The feasibility of the proposed approach is dependent on the capabilities of the edge devices to perform local training. New IoT devices have sufficient computing hardware to run complex machine learning models, but training a neural network is very likely to compromise device performance. However, some lightweight machine learning frameworks have emerged such as Tensorflow Lite<sup>1</sup> which provides solid ground for future implementations.",
910
+ "bbox": [
911
+ 504,
912
+ 696,
913
+ 921,
914
+ 818
915
+ ],
916
+ "page_idx": 4
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "The accuracy of the models, even after personalization, still varies depending on the user. To improve the results, neural networks should be coupled with other methods, such as a prior clustering of clients using criteria other than the",
921
+ "bbox": [
922
+ 504,
923
+ 819,
924
+ 921,
925
+ 880
926
+ ],
927
+ "page_idx": 4
928
+ },
929
+ {
930
+ "type": "page_footnote",
931
+ "text": "1 https://www.tensorflow.org/lite",
932
+ "bbox": [
933
+ 519,
934
+ 893,
935
+ 691,
936
+ 907
937
+ ],
938
+ "page_idx": 4
939
+ },
940
+ {
941
+ "type": "image",
942
+ "img_path": "images/09b144c5c1759917b510347fc989b41178d1e6bebbcc02d1108a6fc55c0e3192.jpg",
943
+ "image_caption": [
944
+ "Fig. 3: Predictions for next hour consumption for client 4313 who participated in training the global model 4. Local training for 5 epochs reduced RMSE from $0.55\\mathrm{kW}$ to $0.388\\mathrm{kW}$ ."
945
+ ],
946
+ "image_footnote": [],
947
+ "bbox": [
948
+ 86,
949
+ 63,
950
+ 503,
951
+ 250
952
+ ],
953
+ "page_idx": 5
954
+ },
955
+ {
956
+ "type": "image",
957
+ "img_path": "images/d9650ebbb10eabb5c26a8289797771035f397b0aed78f13b1176f493cc0971ee.jpg",
958
+ "image_caption": [
959
+ "Fig. 4: Predictions for next hour consumption for client 8467 who did not participate in training the global model 4. Local training for 5 epochs reduced RMSE from $0.8\\mathrm{kW}$ to $0.72\\mathrm{kW}$ ."
960
+ ],
961
+ "image_footnote": [],
962
+ "bbox": [
963
+ 101,
964
+ 316,
965
+ 465,
966
+ 503
967
+ ],
968
+ "page_idx": 5
969
+ },
970
+ {
971
+ "type": "text",
972
+ "text": "geographical proximity. Solving the problem of outliers in this context should also be investigated.",
973
+ "bbox": [
974
+ 73,
975
+ 585,
976
+ 488,
977
+ 617
978
+ ],
979
+ "page_idx": 5
980
+ },
981
+ {
982
+ "type": "text",
983
+ "text": "VI. CONCLUSION",
984
+ "text_level": 1,
985
+ "bbox": [
986
+ 217,
987
+ 637,
988
+ 346,
989
+ 651
990
+ ],
991
+ "page_idx": 5
992
+ },
993
+ {
994
+ "type": "text",
995
+ "text": "Individual short-term load forecasting is a challenging task considering the stochastic nature of consumption profiles. In this paper, we proposed a system model using Edge computing and federated learning to tackle privacy and data diversity challenges related to short-term load forecasting in the smart grid. To the best of our knowledge, this represents one of the first studies of federated learning in the smart grid context. Unlike centralized methods, in the proposed system federated learning uses edge devices to train models, hence reducing security risks to the ones related to the device only. We conducted experiments to evaluate the performance of both centralized and personalized models in federated settings. The simulations results show that it is a promising approach to create highly performing models with a significantly reduced networking load compared to a centralised model, while preserving the privacy of consumption data.",
996
+ "bbox": [
997
+ 73,
998
+ 665,
999
+ 488,
1000
+ 907
1001
+ ],
1002
+ "page_idx": 5
1003
+ },
1004
+ {
1005
+ "type": "text",
1006
+ "text": "ACKNOWLEDGEMENT",
1007
+ "text_level": 1,
1008
+ "bbox": [
1009
+ 620,
1010
+ 61,
1011
+ 807,
1012
+ 75
1013
+ ],
1014
+ "page_idx": 5
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "text": "The authors would like to thank the Natural Sciences and Engineering Research Council of Canada, for the financial support of this research.",
1019
+ "bbox": [
1020
+ 506,
1021
+ 89,
1022
+ 921,
1023
+ 135
1024
+ ],
1025
+ "page_idx": 5
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "text": "REFERENCES",
1030
+ "text_level": 1,
1031
+ "bbox": [
1032
+ 665,
1033
+ 156,
1034
+ 761,
1035
+ 169
1036
+ ],
1037
+ "page_idx": 5
1038
+ },
1039
+ {
1040
+ "type": "list",
1041
+ "sub_type": "ref_text",
1042
+ "list_items": [
1043
+ "[1] Elena Mocanu, Phuong H. Nguyen, Madeleine Gibescu, and Wil L. Kling. Deep learning for estimating building energy consumption. Sustainable Energy, Grids and Networks, 6:91-99, June 2016.",
1044
+ "[2] W. Kong, Z. Y. Dong, D. J. Hill, F. Luo, and Y. Xu. Short-Term Residential Load Forecasting Based on Resident Behaviour Learning. IEEE Transactions on Power Systems, 33(1):1087-1088, January 2018.",
1045
+ "[3] Dhaou Said et al. Advanced scheduling protocol for electric vehicle home charging with time-of-use pricing. pages 6272-6276, June 2013. ISSN: 1938-1883.",
1046
+ "[4] A. Almalaq and J. J. Zhang. Evolutionary Deep Learning-Based Energy Consumption Prediction for Buildings. IEEE Access, 7:1520-1531, 2019.",
1047
+ "[5] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018.",
1048
+ "[6] Abderrahime Filali et al. Prediction-Based Switch Migration Scheduling for SDN Load Balancing. In ICC 2019 - 2019 IEEE International Conference on Communications (ICC), pages 1-6, May 2019. ISSN: 1938-1883.",
1049
+ "[7] Yanbo Huang. Advances in Artificial Neural Networks - Methodological Development and Application. Algorithms, 2(3):973-1007, September 2009.",
1050
+ "[8] B. Stephen, X. Tang, P. R. Harvey, S. Galloway, and K. I. Jennett. Incorporating Practice Theory in Sub-Profile Models for Short Term Aggregated Residential Load Forecasting. IEEE Transactions on Smart Grid, 8(4):1591-1598, July 2017.",
1051
+ "[9] H. Shi, M. Xu, and R. Li. Deep Learning for Household Load Forecasting—A Novel Pooling Deep RNN. IEEE Transactions on Smart Grid, 9(5):5271-5280, September 2018.",
1052
+ "[10] H. Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data.",
1053
+ "[11] S. Hochreiter and J. Schmidhuber. Long short-term memory. *Neural Computation*, 9(8):1735-1780, November 1997.",
1054
+ "[12] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018.",
1055
+ "[13] Jian Zheng, Cencen Xu, Ziang Zhang, and Xiaohua Li. Electric load forecasting in smart grids using Long-Short-Term-Memory based Recurrent Neural Network. In 2017 51st Annual Conference on Information Sciences and Systems (CISS), pages 1-6, March 2017.",
1056
+ "[14] D. L. Marino, K. Amarasinghe, and M. Manic. Building energy load forecasting using Deep Neural Networks. In *IECON 2016 - 42nd Annual Conference of the IEEE Industrial Electronics Society*, pages 7046-7051, October 2016.",
1057
+ "[15] Guangya Zhu, Tin-Tai Chow, and Norman Tse. Short-term load forecasting coupled with weather profile generation methodology. Building Services Engineering Research and Technology, 39(3):310-327, May 2018.",
1058
+ "[16] Dhaou Said et al. Scheduling protocol with load management for EV charging. In 2014 IEEE Global Communications Conference, pages 362-367, December 2014. ISSN: 1930-529X.",
1059
+ "[17] Jihene Rezgui et al. Smart charge scheduling for EVs based on two-way communication. In 2017 IEEE International Conference on Communications (ICC), pages 1-6, May 2017. ISSN: 1938-1883.",
1060
+ "[18] P. Kumar, Y. Lin, G. Bai, A. Paverd, J. S. Dong, and A. Martin. Smart Grid Metering Networks: A Survey on Security, Privacy and Open Research Issues. IEEE Communications Surveys Tutorials, pages 1-1, 2019.",
1061
+ "[19] M. Badra and S. Zeadally. Design and Performance Analysis of a Virtual Ring Architecture for Smart Grid Privacy. IEEE Transactions on Information Forensics and Security, 9(2):321-329, February 2014."
1062
+ ],
1063
+ "bbox": [
1064
+ 509,
1065
+ 181,
1066
+ 921,
1067
+ 906
1068
+ ],
1069
+ "page_idx": 5
1070
+ },
1071
+ {
1072
+ "type": "list",
1073
+ "sub_type": "ref_text",
1074
+ "list_items": [
1075
+ "[20] Y. Gong, Y. Cai, Y. Guo, and Y. Fang. A Privacy-Preserving Scheme for Incentive-Based Demand Response in the Smart Grid. IEEE Transactions on Smart Grid, 7(3):1304-1313, May 2016.",
1076
+ "[21] H. Park, H. Kim, K. Chun, J. Lee, S. Lim, and I. Yie. Untraceability of Group Signature Schemes based on Bilinear Mapping and Their Improvement. In Fourth International Conference on Information Technology (ITNG'07), pages 747-753, April 2007.",
1077
+ "[22] Quoc-Viet Pham, Fang Fang, Vu Nguyen Ha, Mai Le, Zhiguo Ding, Long Bao Le, and Won-Joo Hwang. A Survey of Multi-Access Edge Computing in 5g and Beyond: Fundamentals, Technology Integration, and State-of-the-Art. arXiv:1906.08452 [cs, math], June 2019. arXiv: 1906.08452.",
1078
+ "[23] Andrew Hard, Kanishka Rao, Rajiv Mathews, Swaroop Ramaswamy, Françoise Beaufays, Sean Augenstein, Hubert Eichner, Chloe Kiddon, and Daniel Ramage. Federated learning for mobile keyboard prediction.",
1079
+ "[24] Khe Chai Sim, Petr Zadrazil, and Françoise Beaufays. An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models. In Interspeech 2019, pages 774-778. ISCA, September 2019.",
1080
+ "[25] Keith Bonawitz, Vladimir Ivanov, Ben Kreuter, Antonio Marcedone, H. Brendan McMahan, Sarvar Patel, Daniel Ramage, Aaron Segal, and Karn Seth. Practical Secure Aggregation for Federated Learning on User-Held Data. arXiv:1611.04482 [cs, stat], November 2016. arXiv: 1611.04482.",
1081
+ "[26] Robin C. Geyer, Tassilo Klein, and Moin Nabi. Differentially Private Federated Learning: A Client Level Perspective. arXiv:1712.07557 [cs, stat], December 2017. arXiv: 1712.07557.",
1082
+ "[27] Pecan street inc. dataport 2019 [online] https://dataport.pecanstreet.org/.",
1083
+ "[28] W. Kong, Z. Y. Dong, Y. Jia, D. J. Hill, Y. Xu, and Y. Zhang. ShortTerm Residential Load Forecasting Based on LSTM Recurrent Neural Network. IEEE Transactions on Smart Grid, 10(1):841-851, January 2019.",
1084
+ "[29] Matthew Rowe, Timur Yunusov, Stephen Haben, William Holderbaum, and Ben Potter. The Real-Time Optimisation of DNO Owned Storage Devices on the LV Network for Peak Reduction. Energies, 7(6):3537-3560, June 2014."
1085
+ ],
1086
+ "bbox": [
1087
+ 76,
1088
+ 63,
1089
+ 488,
1090
+ 470
1091
+ ],
1092
+ "page_idx": 6
1093
+ }
1094
+ ]
2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_model.json ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.247,
8
+ 0.058,
9
+ 0.683
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2201.11248v1 [cs.CE] 27 Jan 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.082,
18
+ 0.064,
19
+ 0.917,
20
+ 0.132
21
+ ],
22
+ "angle": 0,
23
+ "content": "Electrical Load Forecasting Using Edge Computing and Federated Learning"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.369,
29
+ 0.153,
30
+ 0.63,
31
+ 0.168
32
+ ],
33
+ "angle": 0,
34
+ "content": "Afaf Taik and Soumaya Cherkaoui"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.268,
40
+ 0.169,
41
+ 0.735,
42
+ 0.184
43
+ ],
44
+ "angle": 0,
45
+ "content": "INTERLAB, Engineering Faculty, Université de Sherbrooke, Canada."
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.336,
51
+ 0.185,
52
+ 0.658,
53
+ 0.199
54
+ ],
55
+ "angle": 0,
56
+ "content": "{afaf.taik, soumaya.cherkaoui} \\(@\\) usherbrooke.ca"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.074,
62
+ 0.23,
63
+ 0.49,
64
+ 0.456
65
+ ],
66
+ "angle": 0,
67
+ "content": "Abstract-In the smart grid, huge amounts of consumption data are used to train deep learning models for applications such as load monitoring and demand response. However, these applications raise concerns regarding security and have high accuracy requirements. In one hand, the data used is privacy-sensitive. For instance, the fine-grained data collected by a smart meter at a consumer's home may reveal information on the appliances and thus the consumer's behaviour at home. On the other hand, the deep learning models require big data volumes with enough variety and to be trained adequately. In this paper, we evaluate the use of Edge computing and federated learning, a decentralized machine learning scheme that allows to increase the volume and diversity of data used to train the deep learning models without compromising privacy. This paper reports, to the best of our knowledge, the first use of federated learning for household load forecasting and achieves promising results. The simulations were done using Tensorflow Federated on the data from 200 houses from Texas, USA."
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.075,
73
+ 0.457,
74
+ 0.489,
75
+ 0.482
76
+ ],
77
+ "angle": 0,
78
+ "content": "Keywords—Federated Learning; Energy Load Forecasting; Edge Computing; Deep Neural Networks; LSTM; Smart Grid."
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.216,
84
+ 0.496,
85
+ 0.348,
86
+ 0.509
87
+ ],
88
+ "angle": 0,
89
+ "content": "I. INTRODUCTION"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.074,
95
+ 0.515,
96
+ 0.49,
97
+ 0.711
98
+ ],
99
+ "angle": 0,
100
+ "content": "Load forecasting is an essential part of the development of the smart grid. Long-term load forecasting is deemed necessary for infrastructure planning, while mid-term and short-term load forecasting are key tasks in system operations [1]. Day-to-day operational efficiency of electrical power delivery, in particular, requires an accurate prediction of short-term load profiles, which is based on collecting and analysing large volumes of high-resolution data from households. However, individual short-term load forecasting (STLF) has been proven to be a challenging task because of profile volatility. In fact, the electrical load of a house has a high correlation to its residents' behaviour, which is too stochastic and often hard to predict [2, 3]."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.074,
106
+ 0.712,
107
+ 0.49,
108
+ 0.909
109
+ ],
110
+ "angle": 0,
111
+ "content": "Benchmarks for state-of-the-art methods [4, 5] have found that deep neural networks are a promising solution for the STLF problem at the household level, due to their ability to capture complex and non-linear patterns. Neural networks outperform other prediction methods such as Auto Regressive Integrated Moving Average (ARIMA)[6] and Support Vector Regression (SVR). Nevertheless, applying deep learning models alone will not lead to significant improvements, as models tend to suffer from overfitting [7]. An overfitted model is a model that learned the details of the training data including the noise, which affects its ability to generalize when applied to new data. To tackle this issue, it is recommended to increase the diversity and size of the used data by combining usage"
112
+ },
113
+ {
114
+ "type": "image",
115
+ "bbox": [
116
+ 0.536,
117
+ 0.233,
118
+ 0.887,
119
+ 0.37
120
+ ],
121
+ "angle": 0,
122
+ "content": null
123
+ },
124
+ {
125
+ "type": "image_caption",
126
+ "bbox": [
127
+ 0.506,
128
+ 0.386,
129
+ 0.921,
130
+ 0.411
131
+ ],
132
+ "angle": 0,
133
+ "content": "Fig. 1: Iterative communications between clients and server in Federated Learning"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.505,
139
+ 0.452,
140
+ 0.923,
141
+ 0.588
142
+ ],
143
+ "angle": 0,
144
+ "content": "records from different households. Typically, proposed frameworks [8, 9] assume that all data records are transferred from smart meters to a centralized computational infrastructure through broadband networks to train models. Nevertheless, this assumption raises concerns related to privacy, since the load profiles reveal a lot of sensitive information, such as device usage and the household's occupancy. Sending such detailed data over networks makes it exposed to malicious interception and misuse."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.505,
150
+ 0.589,
151
+ 0.923,
152
+ 0.785
153
+ ],
154
+ "angle": 0,
155
+ "content": "To address privacy concerns while still increasing data records' volume and variety, a new on-device solution was recently proposed by the Machine Learning community: Federated Learning (FL) [10]. Federated Learning is a decentralized machine learning scheme, where each device participates in training a central model without sending any data. As illustrated in Fig.1, the server first initializes the model either arbitrarily or by using publicly available data. Then, the model is sent to a set of randomly selected devices (clients) for local training using their data. Each client sends to the server an update of the model's weights, which will be averaged and used to update the global model. This process will be repeated until the global model stabilizes."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.505,
161
+ 0.787,
162
+ 0.923,
163
+ 0.908
164
+ ],
165
+ "angle": 0,
166
+ "content": "The main purpose of this paper is to evaluate the use of Edge computing, together with the Federated Learning approach in the STLF challenge for electricity in households. Edge computing refers to data processing at the edge of a network as opposed to cloud or remote server processing. We use Long-short Term Memory (LSTM) [11], a deep neural network for forecasting time series, which uses previous observations of the house's electrical load to predict future"
167
+ }
168
+ ],
169
+ [
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.074,
174
+ 0.063,
175
+ 0.492,
176
+ 0.29
177
+ ],
178
+ "angle": 0,
179
+ "content": "ones. We study a group of houses that have similar properties (geographical location, type of building), on a short period of time to avoid the weather's fluctuations and seasonality impact. Federated learning is performed on houses grid Edge equipment. Edge equipment is usually present at the end of the electrical distribution system as a smart interface between the customer and the electric power supply, be it a smart meter or a more sophisticated equipment. Our contributions in this work can be summarized as follows: (1) We propose an enabling architecture for FL using Edge equipment in the smart grid; (2) We evaluate the potential gain of FL in terms of accuracy through simulations; and (3) we evaluate the potential network load gain through numerical results. To these contributions, we add the gain in privacy leveraged by decentralization and Edge computing."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.075,
185
+ 0.29,
186
+ 0.491,
187
+ 0.382
188
+ ],
189
+ "angle": 0,
190
+ "content": "The remainder of this paper is structured as follows: Section II discusses related works focusing on load prediction and privacy. In Section III, we define the proposed approach and used methods. Section IV introduces the simulations and numerical results. Then in Section V we discuss the limitations and future work. Section VI concludes the paper."
191
+ },
192
+ {
193
+ "type": "title",
194
+ "bbox": [
195
+ 0.211,
196
+ 0.398,
197
+ 0.355,
198
+ 0.412
199
+ ],
200
+ "angle": 0,
201
+ "content": "II. RELATED WORK"
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.074,
207
+ 0.423,
208
+ 0.491,
209
+ 0.665
210
+ ],
211
+ "angle": 0,
212
+ "content": "Many recent research works used deep neural networks, and particularly Long-short term memory (LSTM) to tackle the short-term load forecasting challenge. In fact, benchmarks have proved LSTM's potential compared to other methods[12, 13], yet the results do not match the level of desired exactitude in terms of Root Mean Square Error (RMSE) and Mean Average Percentage Error (MAPE). In order to improve forecasting accuracy, authors in [14] propose to use a variant of LSTM that is a sequence-to-sequence LSTM, which gives better results for one-minute resolution data, but no significant improvement for the one-hour resolution compared to standard LSTM. Furthermore, other authors [4] consider the problem of finding the best LSTM network to be a hyperparameter tuning problem, and use the genetic algorithm to this end. They state that finding the best combination of window size and number of hidden neurons in each layer remains a probabilistic task."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.074,
218
+ 0.666,
219
+ 0.49,
220
+ 0.832
221
+ ],
222
+ "angle": 0,
223
+ "content": "Some other works see that the problem is not simply an neural network architecture problem, and that ability of generalization of data-driven forecasting models is the real issue. In fact, many of the proposed models' accuracy drops when they are applied to new datasets [5]. Some works suggest to use complementary data about the weather [15] or records from the appliances [2]. While the weather has a real impact on the aggregated electrical consumption, the individual short-term load is more related to the occupants' behaviour[3, 16, 17]. However, collecting data from appliances around each house is an expensive and privacy-intrusive task."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.074,
229
+ 0.832,
230
+ 0.492,
231
+ 0.909
232
+ ],
233
+ "angle": 0,
234
+ "content": "Another approach to enrich the training data is grouping data from several customers. Authors in [8] use clustering to group users with similar profiles, hence reducing the variance of uncertainty within groups. Authors in [9] propose a pooling technique that increases data's diversity to overcome the"
235
+ },
236
+ {
237
+ "type": "image",
238
+ "bbox": [
239
+ 0.513,
240
+ 0.062,
241
+ 0.93,
242
+ 0.212
243
+ ],
244
+ "angle": 0,
245
+ "content": null
246
+ },
247
+ {
248
+ "type": "image_caption",
249
+ "bbox": [
250
+ 0.607,
251
+ 0.215,
252
+ 0.822,
253
+ 0.228
254
+ ],
255
+ "angle": 0,
256
+ "content": "Fig. 2: Network components and roles"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.505,
262
+ 0.266,
263
+ 0.921,
264
+ 0.296
265
+ ],
266
+ "angle": 0,
267
+ "content": "overfitting problem. Nonetheless, these methods are heavily centralized and are prone to privacy-issues."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.505,
273
+ 0.297,
274
+ 0.922,
275
+ 0.463
276
+ ],
277
+ "angle": 0,
278
+ "content": "Fine-grained consumption data sent over networks is subject to many privacy threats when leaked through unauthorized interception or eavesdropping [18]. Many efforts were conducted to protect the users' identities in the smart grid. For instance, authors in [19] propose a clustering-based method where each group of users who are geographically close receive a common serial number. However this method makes it hard to treat each client individually because of the anonymity. Other works' focus is masking the consumption data, where data aggregation is the most popular method [20, 21], but it goes in opposite directions with STLF requirements."
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.505,
284
+ 0.463,
285
+ 0.922,
286
+ 0.584
287
+ ],
288
+ "angle": 0,
289
+ "content": "In regards to user privacy and prediction accuracy, none of the aforementioned papers address both of these aspects. In the proposed work, we suggest to use the Edge Equipment that compose the Home Area Network (HAN) to carry out operations related to client selection and training neural network at the Edge following the federated learning scheme, allowing the use of data to train a global model without compromising the resident's privacy."
290
+ },
291
+ {
292
+ "type": "title",
293
+ "bbox": [
294
+ 0.638,
295
+ 0.596,
296
+ 0.792,
297
+ 0.61
298
+ ],
299
+ "angle": 0,
300
+ "content": "III. SYSTEM MODEL"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.505,
306
+ 0.618,
307
+ 0.922,
308
+ 0.77
309
+ ],
310
+ "angle": 0,
311
+ "content": "We propose the network architecture shown in Fig.2 with two main components: a Multi-access Edge Computing (MEC) server [22] and clients. Clients are houses with Edge equipment which is essentially composed of smart-meters and other devices in the HAN. FL is used to build a global LSTM-based model for STLF. The training rounds are orchestrated by the MEC server and executed by the clients using their own electrical consumption data. In this section, we explain in detail LSTM and how it comes to use in the forecasting, as well as FL and how it is used in our system model."
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.506,
317
+ 0.782,
318
+ 0.776,
319
+ 0.797
320
+ ],
321
+ "angle": 0,
322
+ "content": "A. Time series forecasting using LSTM"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.505,
328
+ 0.802,
329
+ 0.922,
330
+ 0.908
331
+ ],
332
+ "angle": 0,
333
+ "content": "The prediction of the future electrical load in this work is achieved through the time series forecasting approach with LSTM. A time series refers to an ordered sequence of equally-spaced data points that represent the evolution of a specific variable over time. Time series forecasting is enabled through modeling the dependencies between the points of current data points and historical data, but the accuracy of the predictions"
334
+ }
335
+ ],
336
+ [
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.074,
341
+ 0.062,
342
+ 0.492,
343
+ 0.092
344
+ ],
345
+ "angle": 0,
346
+ "content": "relies heavily on the chosen model and the quality of historical data points."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.074,
352
+ 0.092,
353
+ 0.493,
354
+ 0.44
355
+ ],
356
+ "angle": 0,
357
+ "content": "LSTM is a recurrent neural network (RNN) that is fundamentally different from traditional feedforward Neural networks, and more efficient than standard RNNs. Sequence learning is LSTM's Forte. It is able to establish the temporal correlations between previous data points and the current circumstances, while solving vanishing and exploding gradient problems that are common in RNNs. Gradient vanishing means that the norm of the gradient for long-term components gets smaller causing weights to never change at lower layers, while the gradient exploding refers to the opposite event [11]. This is achieved through its key components: the memory cell that is used to remember important states in the past, and the gates that regulate the flow of information. LSTM has three gates: the input gate, the output gate and the forget gate. They learn to reset the memory cell for unimportant features during the learning process. Almost all state of the art results in sequence learning are achieved with LSTM and its variants especially language translation and speech recognition. In the case of residential STLF, it is expected that the LSTM network would be able to form an abstraction of some residents' states from the provided consumption profile, maintain the memory of the states, and make a forecast of the future consumption based on the learnt information."
358
+ },
359
+ {
360
+ "type": "title",
361
+ "bbox": [
362
+ 0.075,
363
+ 0.45,
364
+ 0.236,
365
+ 0.466
366
+ ],
367
+ "angle": 0,
368
+ "content": "B. Federated Learning"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.074,
374
+ 0.469,
375
+ 0.491,
376
+ 0.65
377
+ ],
378
+ "angle": 0,
379
+ "content": "Federated learning is a form of machine learning where most of the training process is done in a distributed way among devices referred to as clients. It was first proposed and implemented by Google on keyboards of mobile devices for next word prediction [23]. This approach is ideal for many cases: 1) When data is privacy sensitive, 2) when data is large in size compared to model updates, 3) highly distributed systems where the number of devices is orders of magnitude larger than nodes in a data center, 4) in supervised training when labels can be inferred directly from the user. Federated learning has also proven to be very useful when datasets are unbalanced or non-identically distributed."
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.074,
385
+ 0.651,
386
+ 0.49,
387
+ 0.801
388
+ ],
389
+ "angle": 0,
390
+ "content": "An iteration of federated learning goes as follows: First, a subset of clients is chosen and each of them receives the current model. In our case, clients are hosted at Edge equipment in houses (e.g. smart meters). Clients that were selected compute Stochastic Gradient Descent (SGD) updates on locally-stored data, then a server aggregates the client updates to build a new global model. The new model is sent back to another subset of clients. This process is repeated until the desired prediction accuracy is reached. The operations are detailed in Algorithm 1."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.074,
396
+ 0.802,
397
+ 0.492,
398
+ 0.909
399
+ ],
400
+ "angle": 0,
401
+ "content": "In order to combine the client updates, the server uses the FederatedAveraging algorithm [10]. First, the initial global model is initialized randomly or is pre-trained using publicly available data. In each training round \\( r \\), the server sends a global model \\( w_{r} \\) to a subset \\( K \\) of clients who have enough data records and whose consumption load varies enough to enrich the training data. This condition was added to ensure"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.505,
407
+ 0.062,
408
+ 0.923,
409
+ 0.215
410
+ ],
411
+ "angle": 0,
412
+ "content": "that we have enough variation in terms of data points to give a representation of the occupants' regular consumption. Afterward, every client \\( k \\) in the subset uses \\( n_k \\) examples from its local data. In our case, the volume is related to how long the smart meter has been generating data and how many of it is saved locally. The used dataset is composed of sliding windows with a predetermined number of look-back steps. SGD is then used by each client \\( k \\) to compute the average gradient \\( g_k \\), with a learning rate \\( \\eta \\). The updated models \\( w_k \\) are sent to the server to be aggregated."
413
+ },
414
+ {
415
+ "type": "code_caption",
416
+ "bbox": [
417
+ 0.507,
418
+ 0.238,
419
+ 0.922,
420
+ 0.284
421
+ ],
422
+ "angle": 0,
423
+ "content": "Algorithm 1 Federated Averaging Algorithm. \\( r_{max} \\) is the maximum number of rounds. \\( \\eta \\) is the learning rate and \\( N = \\sum_{k} n_{k} \\)"
424
+ },
425
+ {
426
+ "type": "algorithm",
427
+ "bbox": [
428
+ 0.508,
429
+ 0.286,
430
+ 0.922,
431
+ 0.606
432
+ ],
433
+ "angle": 0,
434
+ "content": "1: initialize the model in training round \\(r = 0\\) \n2: \n3: while \\(r < r_{max}\\) do \n4: Select subset \\(K\\) of clients; \n5: \n6: for client \\(k\\) in \\(K\\) do \n7: if \\(\\sigma(\\text{monthlyload}) > \\text{threshold}\\) then \n8: \\(k\\) receives model \\(w_{r}\\); \n9: \n10: \\(k\\) computes average gradient \\(g_{k}\\) with SGD; \n11: \n12: \\(k\\) updates local model \n13: \\(w_{r+1}^{k} \\gets w_{r}^{k} - \\eta g_{k}\\); \n14: \n15: \\(k\\) sends updated model to server; \n16: end if \n17: end for \n18: server computes new global model using the equation \\(:w_{r+1} \\gets \\sum_{k=0}^{K} \\frac{n_{k}}{N} w_{r+1}^{k}\\); \n19: start next round \\(r \\gets r + 1\\); \n20: end while"
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.505,
440
+ 0.621,
441
+ 0.923,
442
+ 0.756
443
+ ],
444
+ "angle": 0,
445
+ "content": "However, the centralized model may not fit all the users' electrical consumption. A proposed solution to this problem is Personalization. Personalization is the focus of many applications that require understanding user behaviour and adapting to it. It consists on retraining the centralized model using user-specific data to build a personalized model for each user. This can be achieved through retraining the model for a small number of epochs locally using exclusively the user's data [24]."
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.505,
451
+ 0.757,
452
+ 0.923,
453
+ 0.908
454
+ ],
455
+ "angle": 0,
456
+ "content": "Federated learning has fewer privacy risks than centralized server storage, since even when data are anonymized, the users' identities are still at risk and can be discovered through reverse engineering. The model updates sent by each client are ephemeral and never stored on the server; weight updates are processed in memory and are discarded after aggregation. The federated learning procedure requires that the individual weight uploads will not be inspected or analyzed. This is still more-secure than server training because the network and the server cannot be entrusted with fine-grained user data. Some"
457
+ }
458
+ ],
459
+ [
460
+ {
461
+ "type": "text",
462
+ "bbox": [
463
+ 0.074,
464
+ 0.063,
465
+ 0.493,
466
+ 0.125
467
+ ],
468
+ "angle": 0,
469
+ "content": "data still have to be sent in an aggregated form for billing, but these data do not reveal many details. Techniques such as secure aggregation [25] and differential privacy[26] are being explored to enforce trust requirements."
470
+ },
471
+ {
472
+ "type": "title",
473
+ "bbox": [
474
+ 0.076,
475
+ 0.14,
476
+ 0.26,
477
+ 0.155
478
+ ],
479
+ "angle": 0,
480
+ "content": "C. Networking Load Gain"
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.074,
486
+ 0.163,
487
+ 0.491,
488
+ 0.224
489
+ ],
490
+ "angle": 0,
491
+ "content": "To evaluate the gain in network load in FL contrast to centralized training, we first define the network load \\( L_{sC} \\) for a server \\( s \\) in centralized training in Eq. 1 and the network load in FL \\( L_{sF} \\) in Eq. 2."
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.075,
497
+ 0.223,
498
+ 0.492,
499
+ 0.269
500
+ ],
501
+ "angle": 0,
502
+ "content": "\\(S_{k - d}\\) is the size of data sent by the client \\(k\\) and \\(S_{m}\\) is the size of the model. In the centralized training, \\(d_{k}\\) is the number of hops between client \\(k\\) and the server."
503
+ },
504
+ {
505
+ "type": "equation",
506
+ "bbox": [
507
+ 0.205,
508
+ 0.279,
509
+ 0.49,
510
+ 0.322
511
+ ],
512
+ "angle": 0,
513
+ "content": "\\[\nL _ {s C} = \\sum_ {k = 1} ^ {N} S _ {k - d} \\times d _ {k} \\tag {1}\n\\]"
514
+ },
515
+ {
516
+ "type": "equation",
517
+ "bbox": [
518
+ 0.191,
519
+ 0.336,
520
+ 0.49,
521
+ 0.378
522
+ ],
523
+ "angle": 0,
524
+ "content": "\\[\nL _ {s F} = S _ {m} \\times \\sum_ {r = 1} ^ {r _ {\\max }} \\sum_ {k = 1} ^ {K} d _ {k, r} \\tag {2}\n\\]"
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.075,
530
+ 0.387,
531
+ 0.49,
532
+ 0.432
533
+ ],
534
+ "angle": 0,
535
+ "content": "where \\( d_{k,r} \\) is the number of hops between the client \\( k \\) selected in round \\( r \\) and the server, and \\( K \\) is the number of users in each subset."
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.075,
541
+ 0.433,
542
+ 0.49,
543
+ 0.462
544
+ ],
545
+ "angle": 0,
546
+ "content": "Using Eq.1 and Eq.2, we define the gain in networking load as follows:"
547
+ },
548
+ {
549
+ "type": "equation",
550
+ "bbox": [
551
+ 0.212,
552
+ 0.467,
553
+ 0.49,
554
+ 0.485
555
+ ],
556
+ "angle": 0,
557
+ "content": "\\[\nG _ {s} = 1 - L _ {s F} / L _ {s C} \\tag {3}\n\\]"
558
+ },
559
+ {
560
+ "type": "title",
561
+ "bbox": [
562
+ 0.17,
563
+ 0.497,
564
+ 0.396,
565
+ 0.51
566
+ ],
567
+ "angle": 0,
568
+ "content": "IV. SIMULATION AND RESULTS"
569
+ },
570
+ {
571
+ "type": "title",
572
+ "bbox": [
573
+ 0.075,
574
+ 0.522,
575
+ 0.424,
576
+ 0.538
577
+ ],
578
+ "angle": 0,
579
+ "content": "A. Dataset Pre-Processing and Evaluation Method"
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.074,
585
+ 0.544,
586
+ 0.49,
587
+ 0.862
588
+ ],
589
+ "angle": 0,
590
+ "content": "This research was conducted using data from Pecan Street Inc. Dataport site. Dataport contains unique, circuit-level electricity use data at one-minute to one-second intervals for approximately 800 homes in the United States, with Photovoltaics generation and Electrical Vehicles charging data for a subset of these homes [27]. We chose a subset of 200 clients who have similar properties from this dataset. It is composed of the same kind of houses (detached-family homes), located in the same area (Texas). The dataset is composed of records between January 1st 2019 and March 31st 2019 with a one-hour resolution data. The weather fluctuations in this period are low, so the seasonal factor can be ignored in this study. The data of each client is prepared to be ready for further analysis. First, we transform the data to be in a scale between 0 and 1. Then we transform the time series into sliding windows with look-backs of size 12 and a look-ahead of size 1. Finally, we split data into train and test subsets (90% for training and 10% for test). We also split the clients into two groups: 180 participating in the federated learning process, and 20 are left for further evaluation for how well the model can fit non-participating clients."
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.074,
596
+ 0.863,
597
+ 0.491,
598
+ 0.909
599
+ ],
600
+ "angle": 0,
601
+ "content": "We use RMSE and MAPE to evaluate the model's performance with regard to the prediction error. RMSE allows us to quantify the error in terms of energy, while MAPE is a"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.506,
607
+ 0.063,
608
+ 0.923,
609
+ 0.093
610
+ ],
611
+ "angle": 0,
612
+ "content": "percentage quantifying the size of the error relative to the real value. The expressions of RMSE and MAPE are as follows:"
613
+ },
614
+ {
615
+ "type": "equation",
616
+ "bbox": [
617
+ 0.611,
618
+ 0.1,
619
+ 0.922,
620
+ 0.14
621
+ ],
622
+ "angle": 0,
623
+ "content": "\\[\nR M S E = \\sqrt {\\frac {\\sum_ {i = 1} ^ {P} \\left(y _ {i} - \\hat {y} _ {i}\\right) ^ {2}}{N}} \\tag {4}\n\\]"
624
+ },
625
+ {
626
+ "type": "equation",
627
+ "bbox": [
628
+ 0.608,
629
+ 0.165,
630
+ 0.921,
631
+ 0.207
632
+ ],
633
+ "angle": 0,
634
+ "content": "\\[\nM A P E = \\frac {100 \\%}{P} \\sum_ {i = 1} ^ {P} \\left| \\frac {y _ {i} - \\hat {y} _ {i}}{y _ {i}} \\right| \\tag{5}\n\\]"
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.506,
640
+ 0.212,
641
+ 0.921,
642
+ 0.242
643
+ ],
644
+ "angle": 0,
645
+ "content": "where \\(\\hat{y}_i\\) is the predicted value, \\(y_i\\) is the actual value and \\(P\\) is the number of predicted values."
646
+ },
647
+ {
648
+ "type": "title",
649
+ "bbox": [
650
+ 0.507,
651
+ 0.251,
652
+ 0.654,
653
+ 0.266
654
+ ],
655
+ "angle": 0,
656
+ "content": "B. Simulations setup"
657
+ },
658
+ {
659
+ "type": "text",
660
+ "bbox": [
661
+ 0.506,
662
+ 0.27,
663
+ 0.922,
664
+ 0.33
665
+ ],
666
+ "angle": 0,
667
+ "content": "The simulations were conducted on a laptop with a 2,2 GHz Intel i7 processor and 16GB of memory and NVIDIA GeForce GTX 1070 graphic card. We used Tensorflow Federated 0.4.0 with Tensorflow 1.13.1 backend."
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.505,
673
+ 0.331,
674
+ 0.922,
675
+ 0.573
676
+ ],
677
+ "angle": 0,
678
+ "content": "Hyper-parameter tuning in deep learning models is important to obtain the best forecasting performance. However, in this work, we only focus on evaluating the federated learning paradigm. Previous work shows performance insensitivity to combinations of some layers and layer size, as long as we use multiple layers and that the number of hidden nodes is sufficiently large [28]. It was also suggested that very deep networks are prone to under-fitting and vanishing gradients. Following these rules, the initial model hyper-parameters (e.g. number of layers, and time steps to be considered) were chosen by random search on a randomly selected client's data. The retained model has two LSTM hidden layers composed of 200 neurons each. The loss function used is Mean squared error and the optimiser chosen is Adam. The model converges around the 20th epoch and thus we use close values for rounds and epochs."
679
+ },
680
+ {
681
+ "type": "title",
682
+ "bbox": [
683
+ 0.507,
684
+ 0.582,
685
+ 0.658,
686
+ 0.595
687
+ ],
688
+ "angle": 0,
689
+ "content": "C. Numerical Results"
690
+ },
691
+ {
692
+ "type": "title",
693
+ "bbox": [
694
+ 0.525,
695
+ 0.601,
696
+ 0.688,
697
+ 0.614
698
+ ],
699
+ "angle": 0,
700
+ "content": "1) Evaluated scenarios:"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.505,
706
+ 0.616,
707
+ 0.922,
708
+ 0.721
709
+ ],
710
+ "angle": 0,
711
+ "content": "The different scenarios that were evaluated are summarized in Table I. As explained in the previous section, in each round, only a subset of clients train the model. We modify the number of clients in the subset selected in each round, to see the effect of larger subsets. We also vary the number of epochs of local training. In all the scenarios, the federated learning algorithm was run for 20 rounds."
712
+ },
713
+ {
714
+ "type": "table_caption",
715
+ "bbox": [
716
+ 0.641,
717
+ 0.738,
718
+ 0.788,
719
+ 0.75
720
+ ],
721
+ "angle": 0,
722
+ "content": "TABLE I: Used scenarios"
723
+ },
724
+ {
725
+ "type": "table",
726
+ "bbox": [
727
+ 0.574,
728
+ 0.754,
729
+ 0.852,
730
+ 0.817
731
+ ],
732
+ "angle": 0,
733
+ "content": "<table><tr><td>Scenarios</td><td>Clients in subset</td><td>Local Epochs</td></tr><tr><td>1</td><td>5</td><td>1</td></tr><tr><td>2</td><td>20</td><td>1</td></tr><tr><td>3</td><td>5</td><td>5</td></tr><tr><td>4</td><td>20</td><td>5</td></tr></table>"
734
+ },
735
+ {
736
+ "type": "title",
737
+ "bbox": [
738
+ 0.523,
739
+ 0.833,
740
+ 0.724,
741
+ 0.847
742
+ ],
743
+ "angle": 0,
744
+ "content": "2) Results for global models:"
745
+ },
746
+ {
747
+ "type": "text",
748
+ "bbox": [
749
+ 0.506,
750
+ 0.848,
751
+ 0.922,
752
+ 0.908
753
+ ],
754
+ "angle": 0,
755
+ "content": "The evaluated scenarios resulted in global models that are obtained following the federated learning approach. These models are evaluated in terms of RMSE and MAPE as shown in Tables II and III. Null consumption values have been"
756
+ }
757
+ ],
758
+ [
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.074,
763
+ 0.062,
764
+ 0.493,
765
+ 0.321
766
+ ],
767
+ "angle": 0,
768
+ "content": "disgarded when calculating MAPE. Table II summarizes the results for the participating clients in the different scenarios. In our case, the load forecast is on a granular level (single house) and on a short term (1 hour), therefore the values of MAPE achieved in Table II for various models are reasonable, and this level of accuracy is anticipated as similar values have been reported by previous works [28, 29]. These works also report that the forecasting accuracy tends to be low for short-term forecasting horizons. One of the most notable things we notice is that the global model fits some clients better than others when considering the fact that not all clients have similar profiles. We also notice that selecting a bigger number of clients in each round is preferable, but in cases where sending updates is more expensive in terms of networking, the difference can be compensated by using more local training epochs. The results are similar when applied to the set of clients who did not participate in the training."
769
+ },
770
+ {
771
+ "type": "table_caption",
772
+ "bbox": [
773
+ 0.075,
774
+ 0.344,
775
+ 0.49,
776
+ 0.369
777
+ ],
778
+ "angle": 0,
779
+ "content": "TABLE II: Resulting RMSE and MAPE for global models in the considered scenarios for the 180 participating clients"
780
+ },
781
+ {
782
+ "type": "table",
783
+ "bbox": [
784
+ 0.081,
785
+ 0.373,
786
+ 0.481,
787
+ 0.447
788
+ ],
789
+ "angle": 0,
790
+ "content": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.070</td><td>2.652</td><td>0.605</td><td>10.65%</td><td>83.35%</td><td>41.40%</td></tr><tr><td>2</td><td>0.045</td><td>2.55</td><td>0.578</td><td>9.18%</td><td>87.63%</td><td>38.39%</td></tr><tr><td>3</td><td>0.026</td><td>2.652</td><td>0.576</td><td>9.45%</td><td>96.84%</td><td>37.43%</td></tr><tr><td>4</td><td>0.047</td><td>2.68</td><td>0.583</td><td>9.71%</td><td>93.74%</td><td>38.91%</td></tr></table>"
791
+ },
792
+ {
793
+ "type": "table_caption",
794
+ "bbox": [
795
+ 0.075,
796
+ 0.477,
797
+ 0.489,
798
+ 0.501
799
+ ],
800
+ "angle": 0,
801
+ "content": "TABLE III: Resulting RMSE and MAPE for global models in the considered scenarios for the 20 non-participant clients"
802
+ },
803
+ {
804
+ "type": "table",
805
+ "bbox": [
806
+ 0.081,
807
+ 0.505,
808
+ 0.481,
809
+ 0.579
810
+ ],
811
+ "angle": 0,
812
+ "content": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.262</td><td>1.024</td><td>0.589</td><td>15.82%</td><td>60.72%</td><td>44.98%</td></tr><tr><td>2</td><td>0.241</td><td>0.979</td><td>0.550</td><td>16.08%</td><td>55.34%</td><td>40.95%</td></tr><tr><td>3</td><td>0.229</td><td>0.99</td><td>0.530</td><td>15.78%</td><td>53.98%</td><td>39.18%</td></tr><tr><td>4</td><td>0.235</td><td>1.004</td><td>0.543</td><td>16.04%</td><td>56.61%</td><td>41.15%</td></tr></table>"
813
+ },
814
+ {
815
+ "type": "title",
816
+ "bbox": [
817
+ 0.091,
818
+ 0.591,
819
+ 0.316,
820
+ 0.605
821
+ ],
822
+ "angle": 0,
823
+ "content": "3) Behaviour of personalization:"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.074,
829
+ 0.606,
830
+ 0.491,
831
+ 0.877
832
+ ],
833
+ "angle": 0,
834
+ "content": "In this section, we study the effect of personalization on the performance of the models. First we test if re-training the model locally for the participant clients gives better results. Then we apply the same thing to the set of clients who did not participate in the training. The models were retrained for 5 epochs for each client. Results for the set of clients participating in the training are summarized in Table IV and for the non-participating clients in Table V. We notice an overall improvement of most of the models. For example, the model 1 has an overall improvement of \\(5.07\\%\\) in terms of MAPE for the participating set of clients and of \\(4.78\\%\\) on the non-participating clients set. However, for some clients, the performance can not be improved despite retraining, and this, as we mentioned earlier, is related to the quality of historical data points. Applying the models to these clients' consumption profiles results in very high MAPE, which affects the average results. These clients should be treated as outliers, nonetheless, this is beyond the scope of this study."
835
+ },
836
+ {
837
+ "type": "text",
838
+ "bbox": [
839
+ 0.074,
840
+ 0.878,
841
+ 0.49,
842
+ 0.909
843
+ ],
844
+ "angle": 0,
845
+ "content": "To illustrate the improvements on predictions using personalization, we randomly selected a client from the participant"
846
+ },
847
+ {
848
+ "type": "table_caption",
849
+ "bbox": [
850
+ 0.507,
851
+ 0.065,
852
+ 0.922,
853
+ 0.089
854
+ ],
855
+ "angle": 0,
856
+ "content": "TABLE IV: Resulting RMSE and MAPE after personalization over 180 clients"
857
+ },
858
+ {
859
+ "type": "table",
860
+ "bbox": [
861
+ 0.521,
862
+ 0.094,
863
+ 0.905,
864
+ 0.168
865
+ ],
866
+ "angle": 0,
867
+ "content": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.0</td><td>2.47</td><td>0.550</td><td>8.13%</td><td>99.16%</td><td>36.33%</td></tr><tr><td>2</td><td>0.0</td><td>2.47</td><td>0.551</td><td>7.89%</td><td>91.23%</td><td>36.39%</td></tr><tr><td>3</td><td>0.0</td><td>2.371</td><td>0.536</td><td>7.64%</td><td>88.76%</td><td>34.27%</td></tr><tr><td>4</td><td>0.0</td><td>2.375</td><td>0.536</td><td>8.00%</td><td>82.14%</td><td>34.14%</td></tr></table>"
868
+ },
869
+ {
870
+ "type": "table_caption",
871
+ "bbox": [
872
+ 0.507,
873
+ 0.188,
874
+ 0.921,
875
+ 0.213
876
+ ],
877
+ "angle": 0,
878
+ "content": "TABLE V: Resulting RMSE and MAPE after personalization for 20 non-participating clients"
879
+ },
880
+ {
881
+ "type": "table",
882
+ "bbox": [
883
+ 0.514,
884
+ 0.216,
885
+ 0.912,
886
+ 0.29
887
+ ],
888
+ "angle": 0,
889
+ "content": "<table><tr><td></td><td colspan=\"3\">RMSE</td><td colspan=\"3\">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.232</td><td>0.905</td><td>0.516</td><td>18.35%</td><td>53.70%</td><td>40.20%</td></tr><tr><td>2</td><td>0.233</td><td>0.901</td><td>0.516</td><td>16.99%</td><td>58.68%</td><td>40.71%</td></tr><tr><td>3</td><td>0.235</td><td>0.909</td><td>0.516</td><td>15.79%</td><td>54.82%</td><td>39.49%</td></tr><tr><td>4</td><td>0.232</td><td>0.907</td><td>0.509</td><td>15.96%</td><td>52.96%</td><td>39.01%</td></tr></table>"
890
+ },
891
+ {
892
+ "type": "text",
893
+ "bbox": [
894
+ 0.505,
895
+ 0.317,
896
+ 0.921,
897
+ 0.392
898
+ ],
899
+ "angle": 0,
900
+ "content": "set (client 4313) and a client from the non-participant set (client 8467). We applied the global model 4 and the corresponding personalized models. The actual load profiles and the predicted profiles are shown in Fig.3 and Fig.4. Both models fit the overall behaviour of the consumption profiles."
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.505,
906
+ 0.393,
907
+ 0.922,
908
+ 0.527
909
+ ],
910
+ "angle": 0,
911
+ "content": "We conclude that we can indeed train powerful models for a population's consumption profiles using only a subset of the users forming it. For applications that have high accuracy requirements, the model can be retrained resulting in a personalized model that follows the profile's curves better, yielding more accurate predictions. Nonetheless, the predictions obtained with the global model can be a good starting point for new clients who don't have enough data for personalization."
912
+ },
913
+ {
914
+ "type": "title",
915
+ "bbox": [
916
+ 0.511,
917
+ 0.529,
918
+ 0.687,
919
+ 0.542
920
+ ],
921
+ "angle": 0,
922
+ "content": "4) Gain in network load:"
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.505,
928
+ 0.543,
929
+ 0.922,
930
+ 0.665
931
+ ],
932
+ "angle": 0,
933
+ "content": "To illustrate the gain in the network load, we can consider the most basic case where the distance between all the clients and the MEC server is 1-Hop. The size of the model is \\(1,9\\mathrm{Kb}\\) and the size of the used data is \\(16\\mathrm{Mb}\\). Using Eq.3, the gain in the scenarios 1 and 3 is \\(97\\%\\), while scenarios 2 and 4 result in a gain of \\(90\\%\\). This is a significant gain, especially when considering that the approach could be applied at the scale of a city or bigger, for example."
934
+ },
935
+ {
936
+ "type": "title",
937
+ "bbox": [
938
+ 0.602,
939
+ 0.677,
940
+ 0.825,
941
+ 0.691
942
+ ],
943
+ "angle": 0,
944
+ "content": "V. REMARKS & FUTURE WORK"
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.505,
950
+ 0.698,
951
+ 0.922,
952
+ 0.819
953
+ ],
954
+ "angle": 0,
955
+ "content": "The feasibility of the proposed approach is dependent on the capabilities of the edge devices to perform local training. New IoT devices have sufficient computing hardware to run complex machine learning models, but training a neural network is very likely to compromise device performance. However, some lightweight machine learning frameworks have emerged such as Tensorflow Lite<sup>1</sup> which provides solid ground for future implementations."
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.505,
961
+ 0.82,
962
+ 0.922,
963
+ 0.881
964
+ ],
965
+ "angle": 0,
966
+ "content": "The accuracy of the models, even after personalization, still varies depending on the user. To improve the results, neural networks should be coupled with other methods, such as a prior clustering of clients using criteria other than the"
967
+ },
968
+ {
969
+ "type": "page_footnote",
970
+ "bbox": [
971
+ 0.521,
972
+ 0.894,
973
+ 0.692,
974
+ 0.908
975
+ ],
976
+ "angle": 0,
977
+ "content": "1 https://www.tensorflow.org/lite"
978
+ }
979
+ ],
980
+ [
981
+ {
982
+ "type": "image",
983
+ "bbox": [
984
+ 0.087,
985
+ 0.064,
986
+ 0.504,
987
+ 0.25
988
+ ],
989
+ "angle": 0,
990
+ "content": null
991
+ },
992
+ {
993
+ "type": "image_caption",
994
+ "bbox": [
995
+ 0.075,
996
+ 0.252,
997
+ 0.49,
998
+ 0.287
999
+ ],
1000
+ "angle": 0,
1001
+ "content": "Fig. 3: Predictions for next hour consumption for client 4313 who participated in training the global model 4. Local training for 5 epochs reduced RMSE from \\(0.55\\mathrm{kW}\\) to \\(0.388\\mathrm{kW}\\)."
1002
+ },
1003
+ {
1004
+ "type": "image",
1005
+ "bbox": [
1006
+ 0.102,
1007
+ 0.318,
1008
+ 0.466,
1009
+ 0.505
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": null
1013
+ },
1014
+ {
1015
+ "type": "image_caption",
1016
+ "bbox": [
1017
+ 0.075,
1018
+ 0.511,
1019
+ 0.49,
1020
+ 0.547
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "Fig. 4: Predictions for next hour consumption for client 8467 who did not participate in training the global model 4. Local training for 5 epochs reduced RMSE from \\(0.8\\mathrm{kW}\\) to \\(0.72\\mathrm{kW}\\)."
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "bbox": [
1028
+ 0.075,
1029
+ 0.587,
1030
+ 0.489,
1031
+ 0.618
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": "geographical proximity. Solving the problem of outliers in this context should also be investigated."
1035
+ },
1036
+ {
1037
+ "type": "title",
1038
+ "bbox": [
1039
+ 0.218,
1040
+ 0.638,
1041
+ 0.347,
1042
+ 0.652
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "VI. CONCLUSION"
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "bbox": [
1050
+ 0.074,
1051
+ 0.666,
1052
+ 0.49,
1053
+ 0.909
1054
+ ],
1055
+ "angle": 0,
1056
+ "content": "Individual short-term load forecasting is a challenging task considering the stochastic nature of consumption profiles. In this paper, we proposed a system model using Edge computing and federated learning to tackle privacy and data diversity challenges related to short-term load forecasting in the smart grid. To the best of our knowledge, this represents one of the first studies of federated learning in the smart grid context. Unlike centralized methods, in the proposed system federated learning uses edge devices to train models, hence reducing security risks to the ones related to the device only. We conducted experiments to evaluate the performance of both centralized and personalized models in federated settings. The simulations results show that it is a promising approach to create highly performing models with a significantly reduced networking load compared to a centralised model, while preserving the privacy of consumption data."
1057
+ },
1058
+ {
1059
+ "type": "title",
1060
+ "bbox": [
1061
+ 0.622,
1062
+ 0.063,
1063
+ 0.808,
1064
+ 0.077
1065
+ ],
1066
+ "angle": 0,
1067
+ "content": "ACKNOWLEDGEMENT"
1068
+ },
1069
+ {
1070
+ "type": "text",
1071
+ "bbox": [
1072
+ 0.507,
1073
+ 0.09,
1074
+ 0.922,
1075
+ 0.136
1076
+ ],
1077
+ "angle": 0,
1078
+ "content": "The authors would like to thank the Natural Sciences and Engineering Research Council of Canada, for the financial support of this research."
1079
+ },
1080
+ {
1081
+ "type": "title",
1082
+ "bbox": [
1083
+ 0.666,
1084
+ 0.157,
1085
+ 0.762,
1086
+ 0.17
1087
+ ],
1088
+ "angle": 0,
1089
+ "content": "REFERENCES"
1090
+ },
1091
+ {
1092
+ "type": "ref_text",
1093
+ "bbox": [
1094
+ 0.515,
1095
+ 0.182,
1096
+ 0.922,
1097
+ 0.216
1098
+ ],
1099
+ "angle": 0,
1100
+ "content": "[1] Elena Mocanu, Phuong H. Nguyen, Madeleine Gibescu, and Wil L. Kling. Deep learning for estimating building energy consumption. Sustainable Energy, Grids and Networks, 6:91-99, June 2016."
1101
+ },
1102
+ {
1103
+ "type": "ref_text",
1104
+ "bbox": [
1105
+ 0.516,
1106
+ 0.217,
1107
+ 0.921,
1108
+ 0.25
1109
+ ],
1110
+ "angle": 0,
1111
+ "content": "[2] W. Kong, Z. Y. Dong, D. J. Hill, F. Luo, and Y. Xu. Short-Term Residential Load Forecasting Based on Resident Behaviour Learning. IEEE Transactions on Power Systems, 33(1):1087-1088, January 2018."
1112
+ },
1113
+ {
1114
+ "type": "ref_text",
1115
+ "bbox": [
1116
+ 0.517,
1117
+ 0.251,
1118
+ 0.921,
1119
+ 0.283
1120
+ ],
1121
+ "angle": 0,
1122
+ "content": "[3] Dhaou Said et al. Advanced scheduling protocol for electric vehicle home charging with time-of-use pricing. pages 6272-6276, June 2013. ISSN: 1938-1883."
1123
+ },
1124
+ {
1125
+ "type": "ref_text",
1126
+ "bbox": [
1127
+ 0.517,
1128
+ 0.284,
1129
+ 0.921,
1130
+ 0.317
1131
+ ],
1132
+ "angle": 0,
1133
+ "content": "[4] A. Almalaq and J. J. Zhang. Evolutionary Deep Learning-Based Energy Consumption Prediction for Buildings. IEEE Access, 7:1520-1531, 2019."
1134
+ },
1135
+ {
1136
+ "type": "ref_text",
1137
+ "bbox": [
1138
+ 0.517,
1139
+ 0.318,
1140
+ 0.921,
1141
+ 0.363
1142
+ ],
1143
+ "angle": 0,
1144
+ "content": "[5] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018."
1145
+ },
1146
+ {
1147
+ "type": "ref_text",
1148
+ "bbox": [
1149
+ 0.517,
1150
+ 0.364,
1151
+ 0.921,
1152
+ 0.407
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": "[6] Abderrahime Filali et al. Prediction-Based Switch Migration Scheduling for SDN Load Balancing. In ICC 2019 - 2019 IEEE International Conference on Communications (ICC), pages 1-6, May 2019. ISSN: 1938-1883."
1156
+ },
1157
+ {
1158
+ "type": "ref_text",
1159
+ "bbox": [
1160
+ 0.517,
1161
+ 0.409,
1162
+ 0.921,
1163
+ 0.442
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "[7] Yanbo Huang. Advances in Artificial Neural Networks - Methodological Development and Application. Algorithms, 2(3):973-1007, September 2009."
1167
+ },
1168
+ {
1169
+ "type": "ref_text",
1170
+ "bbox": [
1171
+ 0.517,
1172
+ 0.444,
1173
+ 0.921,
1174
+ 0.487
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": "[8] B. Stephen, X. Tang, P. R. Harvey, S. Galloway, and K. I. Jennett. Incorporating Practice Theory in Sub-Profile Models for Short Term Aggregated Residential Load Forecasting. IEEE Transactions on Smart Grid, 8(4):1591-1598, July 2017."
1178
+ },
1179
+ {
1180
+ "type": "ref_text",
1181
+ "bbox": [
1182
+ 0.517,
1183
+ 0.488,
1184
+ 0.921,
1185
+ 0.522
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "[9] H. Shi, M. Xu, and R. Li. Deep Learning for Household Load Forecasting—A Novel Pooling Deep RNN. IEEE Transactions on Smart Grid, 9(5):5271-5280, September 2018."
1189
+ },
1190
+ {
1191
+ "type": "ref_text",
1192
+ "bbox": [
1193
+ 0.51,
1194
+ 0.523,
1195
+ 0.921,
1196
+ 0.554
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": "[10] H. Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data."
1200
+ },
1201
+ {
1202
+ "type": "ref_text",
1203
+ "bbox": [
1204
+ 0.51,
1205
+ 0.556,
1206
+ 0.921,
1207
+ 0.578
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "[11] S. Hochreiter and J. Schmidhuber. Long short-term memory. *Neural Computation*, 9(8):1735-1780, November 1997."
1211
+ },
1212
+ {
1213
+ "type": "ref_text",
1214
+ "bbox": [
1215
+ 0.51,
1216
+ 0.579,
1217
+ 0.921,
1218
+ 0.623
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": "[12] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018."
1222
+ },
1223
+ {
1224
+ "type": "ref_text",
1225
+ "bbox": [
1226
+ 0.51,
1227
+ 0.624,
1228
+ 0.921,
1229
+ 0.669
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": "[13] Jian Zheng, Cencen Xu, Ziang Zhang, and Xiaohua Li. Electric load forecasting in smart grids using Long-Short-Term-Memory based Recurrent Neural Network. In 2017 51st Annual Conference on Information Sciences and Systems (CISS), pages 1-6, March 2017."
1233
+ },
1234
+ {
1235
+ "type": "ref_text",
1236
+ "bbox": [
1237
+ 0.51,
1238
+ 0.67,
1239
+ 0.921,
1240
+ 0.713
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": "[14] D. L. Marino, K. Amarasinghe, and M. Manic. Building energy load forecasting using Deep Neural Networks. In *IECON 2016 - 42nd Annual Conference of the IEEE Industrial Electronics Society*, pages 7046-7051, October 2016."
1244
+ },
1245
+ {
1246
+ "type": "ref_text",
1247
+ "bbox": [
1248
+ 0.51,
1249
+ 0.715,
1250
+ 0.921,
1251
+ 0.758
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": "[15] Guangya Zhu, Tin-Tai Chow, and Norman Tse. Short-term load forecasting coupled with weather profile generation methodology. Building Services Engineering Research and Technology, 39(3):310-327, May 2018."
1255
+ },
1256
+ {
1257
+ "type": "ref_text",
1258
+ "bbox": [
1259
+ 0.51,
1260
+ 0.759,
1261
+ 0.921,
1262
+ 0.793
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "[16] Dhaou Said et al. Scheduling protocol with load management for EV charging. In 2014 IEEE Global Communications Conference, pages 362-367, December 2014. ISSN: 1930-529X."
1266
+ },
1267
+ {
1268
+ "type": "ref_text",
1269
+ "bbox": [
1270
+ 0.51,
1271
+ 0.794,
1272
+ 0.921,
1273
+ 0.828
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "[17] Jihene Rezgui et al. Smart charge scheduling for EVs based on two-way communication. In 2017 IEEE International Conference on Communications (ICC), pages 1-6, May 2017. ISSN: 1938-1883."
1277
+ },
1278
+ {
1279
+ "type": "ref_text",
1280
+ "bbox": [
1281
+ 0.51,
1282
+ 0.829,
1283
+ 0.921,
1284
+ 0.871
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": "[18] P. Kumar, Y. Lin, G. Bai, A. Paverd, J. S. Dong, and A. Martin. Smart Grid Metering Networks: A Survey on Security, Privacy and Open Research Issues. IEEE Communications Surveys Tutorials, pages 1-1, 2019."
1288
+ },
1289
+ {
1290
+ "type": "ref_text",
1291
+ "bbox": [
1292
+ 0.51,
1293
+ 0.872,
1294
+ 0.921,
1295
+ 0.907
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "[19] M. Badra and S. Zeadally. Design and Performance Analysis of a Virtual Ring Architecture for Smart Grid Privacy. IEEE Transactions on Information Forensics and Security, 9(2):321-329, February 2014."
1299
+ },
1300
+ {
1301
+ "type": "list",
1302
+ "bbox": [
1303
+ 0.51,
1304
+ 0.182,
1305
+ 0.922,
1306
+ 0.907
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": null
1310
+ }
1311
+ ],
1312
+ [
1313
+ {
1314
+ "type": "ref_text",
1315
+ "bbox": [
1316
+ 0.077,
1317
+ 0.064,
1318
+ 0.49,
1319
+ 0.099
1320
+ ],
1321
+ "angle": 0,
1322
+ "content": "[20] Y. Gong, Y. Cai, Y. Guo, and Y. Fang. A Privacy-Preserving Scheme for Incentive-Based Demand Response in the Smart Grid. IEEE Transactions on Smart Grid, 7(3):1304-1313, May 2016."
1323
+ },
1324
+ {
1325
+ "type": "ref_text",
1326
+ "bbox": [
1327
+ 0.077,
1328
+ 0.099,
1329
+ 0.49,
1330
+ 0.144
1331
+ ],
1332
+ "angle": 0,
1333
+ "content": "[21] H. Park, H. Kim, K. Chun, J. Lee, S. Lim, and I. Yie. Untraceability of Group Signature Schemes based on Bilinear Mapping and Their Improvement. In Fourth International Conference on Information Technology (ITNG'07), pages 747-753, April 2007."
1334
+ },
1335
+ {
1336
+ "type": "ref_text",
1337
+ "bbox": [
1338
+ 0.078,
1339
+ 0.145,
1340
+ 0.49,
1341
+ 0.2
1342
+ ],
1343
+ "angle": 0,
1344
+ "content": "[22] Quoc-Viet Pham, Fang Fang, Vu Nguyen Ha, Mai Le, Zhiguo Ding, Long Bao Le, and Won-Joo Hwang. A Survey of Multi-Access Edge Computing in 5g and Beyond: Fundamentals, Technology Integration, and State-of-the-Art. arXiv:1906.08452 [cs, math], June 2019. arXiv: 1906.08452."
1345
+ },
1346
+ {
1347
+ "type": "ref_text",
1348
+ "bbox": [
1349
+ 0.078,
1350
+ 0.201,
1351
+ 0.49,
1352
+ 0.234
1353
+ ],
1354
+ "angle": 0,
1355
+ "content": "[23] Andrew Hard, Kanishka Rao, Rajiv Mathews, Swaroop Ramaswamy, Françoise Beaufays, Sean Augenstein, Hubert Eichner, Chloe Kiddon, and Daniel Ramage. Federated learning for mobile keyboard prediction."
1356
+ },
1357
+ {
1358
+ "type": "ref_text",
1359
+ "bbox": [
1360
+ 0.078,
1361
+ 0.235,
1362
+ 0.49,
1363
+ 0.28
1364
+ ],
1365
+ "angle": 0,
1366
+ "content": "[24] Khe Chai Sim, Petr Zadrazil, and Françoise Beaufays. An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models. In Interspeech 2019, pages 774-778. ISCA, September 2019."
1367
+ },
1368
+ {
1369
+ "type": "ref_text",
1370
+ "bbox": [
1371
+ 0.078,
1372
+ 0.281,
1373
+ 0.49,
1374
+ 0.336
1375
+ ],
1376
+ "angle": 0,
1377
+ "content": "[25] Keith Bonawitz, Vladimir Ivanov, Ben Kreuter, Antonio Marcedone, H. Brendan McMahan, Sarvar Patel, Daniel Ramage, Aaron Segal, and Karn Seth. Practical Secure Aggregation for Federated Learning on User-Held Data. arXiv:1611.04482 [cs, stat], November 2016. arXiv: 1611.04482."
1378
+ },
1379
+ {
1380
+ "type": "ref_text",
1381
+ "bbox": [
1382
+ 0.078,
1383
+ 0.337,
1384
+ 0.49,
1385
+ 0.371
1386
+ ],
1387
+ "angle": 0,
1388
+ "content": "[26] Robin C. Geyer, Tassilo Klein, and Moin Nabi. Differentially Private Federated Learning: A Client Level Perspective. arXiv:1712.07557 [cs, stat], December 2017. arXiv: 1712.07557."
1389
+ },
1390
+ {
1391
+ "type": "ref_text",
1392
+ "bbox": [
1393
+ 0.078,
1394
+ 0.372,
1395
+ 0.49,
1396
+ 0.382
1397
+ ],
1398
+ "angle": 0,
1399
+ "content": "[27] Pecan street inc. dataport 2019 [online] https://dataport.pecanstreet.org/."
1400
+ },
1401
+ {
1402
+ "type": "ref_text",
1403
+ "bbox": [
1404
+ 0.078,
1405
+ 0.383,
1406
+ 0.49,
1407
+ 0.427
1408
+ ],
1409
+ "angle": 0,
1410
+ "content": "[28] W. Kong, Z. Y. Dong, Y. Jia, D. J. Hill, Y. Xu, and Y. Zhang. ShortTerm Residential Load Forecasting Based on LSTM Recurrent Neural Network. IEEE Transactions on Smart Grid, 10(1):841-851, January 2019."
1411
+ },
1412
+ {
1413
+ "type": "ref_text",
1414
+ "bbox": [
1415
+ 0.078,
1416
+ 0.428,
1417
+ 0.49,
1418
+ 0.472
1419
+ ],
1420
+ "angle": 0,
1421
+ "content": "[29] Matthew Rowe, Timur Yunusov, Stephen Haben, William Holderbaum, and Ben Potter. The Real-Time Optimisation of DNO Owned Storage Devices on the LV Network for Peak Reduction. Energies, 7(6):3537-3560, June 2014."
1422
+ },
1423
+ {
1424
+ "type": "list",
1425
+ "bbox": [
1426
+ 0.077,
1427
+ 0.064,
1428
+ 0.49,
1429
+ 0.472
1430
+ ],
1431
+ "angle": 0,
1432
+ "content": null
1433
+ }
1434
+ ]
1435
+ ]
2201.11xxx/2201.11248/a74eab91-73ff-44e2-9768-88fdde72bd1e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4270b5ece07c753f37021bd2b25e453619dd786fd601a7d2bede634a5ac3747b
3
+ size 499634
2201.11xxx/2201.11248/full.md ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Electrical Load Forecasting Using Edge Computing and Federated Learning
2
+
3
+ Afaf Taik and Soumaya Cherkaoui
4
+
5
+ INTERLAB, Engineering Faculty, Université de Sherbrooke, Canada.
6
+
7
+ {afaf.taik, soumaya.cherkaoui} $@$ usherbrooke.ca
8
+
9
+ Abstract-In the smart grid, huge amounts of consumption data are used to train deep learning models for applications such as load monitoring and demand response. However, these applications raise concerns regarding security and have high accuracy requirements. In one hand, the data used is privacy-sensitive. For instance, the fine-grained data collected by a smart meter at a consumer's home may reveal information on the appliances and thus the consumer's behaviour at home. On the other hand, the deep learning models require big data volumes with enough variety and to be trained adequately. In this paper, we evaluate the use of Edge computing and federated learning, a decentralized machine learning scheme that allows to increase the volume and diversity of data used to train the deep learning models without compromising privacy. This paper reports, to the best of our knowledge, the first use of federated learning for household load forecasting and achieves promising results. The simulations were done using Tensorflow Federated on the data from 200 houses from Texas, USA.
10
+
11
+ Keywords—Federated Learning; Energy Load Forecasting; Edge Computing; Deep Neural Networks; LSTM; Smart Grid.
12
+
13
+ # I. INTRODUCTION
14
+
15
+ Load forecasting is an essential part of the development of the smart grid. Long-term load forecasting is deemed necessary for infrastructure planning, while mid-term and short-term load forecasting are key tasks in system operations [1]. Day-to-day operational efficiency of electrical power delivery, in particular, requires an accurate prediction of short-term load profiles, which is based on collecting and analysing large volumes of high-resolution data from households. However, individual short-term load forecasting (STLF) has been proven to be a challenging task because of profile volatility. In fact, the electrical load of a house has a high correlation to its residents' behaviour, which is too stochastic and often hard to predict [2, 3].
16
+
17
+ Benchmarks for state-of-the-art methods [4, 5] have found that deep neural networks are a promising solution for the STLF problem at the household level, due to their ability to capture complex and non-linear patterns. Neural networks outperform other prediction methods such as Auto Regressive Integrated Moving Average (ARIMA)[6] and Support Vector Regression (SVR). Nevertheless, applying deep learning models alone will not lead to significant improvements, as models tend to suffer from overfitting [7]. An overfitted model is a model that learned the details of the training data including the noise, which affects its ability to generalize when applied to new data. To tackle this issue, it is recommended to increase the diversity and size of the used data by combining usage
18
+
19
+ ![](images/29a086304edd72e9defb41b45bb0159b3abbc7de81d9722785dd0a8714a16bf6.jpg)
20
+ Fig. 1: Iterative communications between clients and server in Federated Learning
21
+
22
+ records from different households. Typically, proposed frameworks [8, 9] assume that all data records are transferred from smart meters to a centralized computational infrastructure through broadband networks to train models. Nevertheless, this assumption raises concerns related to privacy, since the load profiles reveal a lot of sensitive information, such as device usage and the household's occupancy. Sending such detailed data over networks makes it exposed to malicious interception and misuse.
23
+
24
+ To address privacy concerns while still increasing data records' volume and variety, a new on-device solution was recently proposed by the Machine Learning community: Federated Learning (FL) [10]. Federated Learning is a decentralized machine learning scheme, where each device participates in training a central model without sending any data. As illustrated in Fig.1, the server first initializes the model either arbitrarily or by using publicly available data. Then, the model is sent to a set of randomly selected devices (clients) for local training using their data. Each client sends to the server an update of the model's weights, which will be averaged and used to update the global model. This process will be repeated until the global model stabilizes.
25
+
26
+ The main purpose of this paper is to evaluate the use of Edge computing, together with the Federated Learning approach in the STLF challenge for electricity in households. Edge computing refers to data processing at the edge of a network as opposed to cloud or remote server processing. We use Long-short Term Memory (LSTM) [11], a deep neural network for forecasting time series, which uses previous observations of the house's electrical load to predict future
27
+
28
+ ones. We study a group of houses that have similar properties (geographical location, type of building), on a short period of time to avoid the weather's fluctuations and seasonality impact. Federated learning is performed on houses grid Edge equipment. Edge equipment is usually present at the end of the electrical distribution system as a smart interface between the customer and the electric power supply, be it a smart meter or a more sophisticated equipment. Our contributions in this work can be summarized as follows: (1) We propose an enabling architecture for FL using Edge equipment in the smart grid; (2) We evaluate the potential gain of FL in terms of accuracy through simulations; and (3) we evaluate the potential network load gain through numerical results. To these contributions, we add the gain in privacy leveraged by decentralization and Edge computing.
29
+
30
+ The remainder of this paper is structured as follows: Section II discusses related works focusing on load prediction and privacy. In Section III, we define the proposed approach and used methods. Section IV introduces the simulations and numerical results. Then in Section V we discuss the limitations and future work. Section VI concludes the paper.
31
+
32
+ # II. RELATED WORK
33
+
34
+ Many recent research works used deep neural networks, and particularly Long-short term memory (LSTM) to tackle the short-term load forecasting challenge. In fact, benchmarks have proved LSTM's potential compared to other methods[12, 13], yet the results do not match the level of desired exactitude in terms of Root Mean Square Error (RMSE) and Mean Average Percentage Error (MAPE). In order to improve forecasting accuracy, authors in [14] propose to use a variant of LSTM that is a sequence-to-sequence LSTM, which gives better results for one-minute resolution data, but no significant improvement for the one-hour resolution compared to standard LSTM. Furthermore, other authors [4] consider the problem of finding the best LSTM network to be a hyperparameter tuning problem, and use the genetic algorithm to this end. They state that finding the best combination of window size and number of hidden neurons in each layer remains a probabilistic task.
35
+
36
+ Some other works see that the problem is not simply an neural network architecture problem, and that ability of generalization of data-driven forecasting models is the real issue. In fact, many of the proposed models' accuracy drops when they are applied to new datasets [5]. Some works suggest to use complementary data about the weather [15] or records from the appliances [2]. While the weather has a real impact on the aggregated electrical consumption, the individual short-term load is more related to the occupants' behaviour[3, 16, 17]. However, collecting data from appliances around each house is an expensive and privacy-intrusive task.
37
+
38
+ Another approach to enrich the training data is grouping data from several customers. Authors in [8] use clustering to group users with similar profiles, hence reducing the variance of uncertainty within groups. Authors in [9] propose a pooling technique that increases data's diversity to overcome the
39
+
40
+ ![](images/fd1f7e10da0f2dfb70fab787c457c2868322543764f7bb8b2084c83aa892f018.jpg)
41
+ Fig. 2: Network components and roles
42
+
43
+ overfitting problem. Nonetheless, these methods are heavily centralized and are prone to privacy-issues.
44
+
45
+ Fine-grained consumption data sent over networks is subject to many privacy threats when leaked through unauthorized interception or eavesdropping [18]. Many efforts were conducted to protect the users' identities in the smart grid. For instance, authors in [19] propose a clustering-based method where each group of users who are geographically close receive a common serial number. However this method makes it hard to treat each client individually because of the anonymity. Other works' focus is masking the consumption data, where data aggregation is the most popular method [20, 21], but it goes in opposite directions with STLF requirements.
46
+
47
+ In regards to user privacy and prediction accuracy, none of the aforementioned papers address both of these aspects. In the proposed work, we suggest to use the Edge Equipment that compose the Home Area Network (HAN) to carry out operations related to client selection and training neural network at the Edge following the federated learning scheme, allowing the use of data to train a global model without compromising the resident's privacy.
48
+
49
+ # III. SYSTEM MODEL
50
+
51
+ We propose the network architecture shown in Fig.2 with two main components: a Multi-access Edge Computing (MEC) server [22] and clients. Clients are houses with Edge equipment which is essentially composed of smart-meters and other devices in the HAN. FL is used to build a global LSTM-based model for STLF. The training rounds are orchestrated by the MEC server and executed by the clients using their own electrical consumption data. In this section, we explain in detail LSTM and how it comes to use in the forecasting, as well as FL and how it is used in our system model.
52
+
53
+ # A. Time series forecasting using LSTM
54
+
55
+ The prediction of the future electrical load in this work is achieved through the time series forecasting approach with LSTM. A time series refers to an ordered sequence of equally-spaced data points that represent the evolution of a specific variable over time. Time series forecasting is enabled through modeling the dependencies between the points of current data points and historical data, but the accuracy of the predictions
56
+
57
+ relies heavily on the chosen model and the quality of historical data points.
58
+
59
+ LSTM is a recurrent neural network (RNN) that is fundamentally different from traditional feedforward Neural networks, and more efficient than standard RNNs. Sequence learning is LSTM's Forte. It is able to establish the temporal correlations between previous data points and the current circumstances, while solving vanishing and exploding gradient problems that are common in RNNs. Gradient vanishing means that the norm of the gradient for long-term components gets smaller causing weights to never change at lower layers, while the gradient exploding refers to the opposite event [11]. This is achieved through its key components: the memory cell that is used to remember important states in the past, and the gates that regulate the flow of information. LSTM has three gates: the input gate, the output gate and the forget gate. They learn to reset the memory cell for unimportant features during the learning process. Almost all state of the art results in sequence learning are achieved with LSTM and its variants especially language translation and speech recognition. In the case of residential STLF, it is expected that the LSTM network would be able to form an abstraction of some residents' states from the provided consumption profile, maintain the memory of the states, and make a forecast of the future consumption based on the learnt information.
60
+
61
+ # B. Federated Learning
62
+
63
+ Federated learning is a form of machine learning where most of the training process is done in a distributed way among devices referred to as clients. It was first proposed and implemented by Google on keyboards of mobile devices for next word prediction [23]. This approach is ideal for many cases: 1) When data is privacy sensitive, 2) when data is large in size compared to model updates, 3) highly distributed systems where the number of devices is orders of magnitude larger than nodes in a data center, 4) in supervised training when labels can be inferred directly from the user. Federated learning has also proven to be very useful when datasets are unbalanced or non-identically distributed.
64
+
65
+ An iteration of federated learning goes as follows: First, a subset of clients is chosen and each of them receives the current model. In our case, clients are hosted at Edge equipment in houses (e.g. smart meters). Clients that were selected compute Stochastic Gradient Descent (SGD) updates on locally-stored data, then a server aggregates the client updates to build a new global model. The new model is sent back to another subset of clients. This process is repeated until the desired prediction accuracy is reached. The operations are detailed in Algorithm 1.
66
+
67
+ In order to combine the client updates, the server uses the FederatedAveraging algorithm [10]. First, the initial global model is initialized randomly or is pre-trained using publicly available data. In each training round $r$ , the server sends a global model $w_{r}$ to a subset $K$ of clients who have enough data records and whose consumption load varies enough to enrich the training data. This condition was added to ensure
68
+
69
+ that we have enough variation in terms of data points to give a representation of the occupants' regular consumption. Afterward, every client $k$ in the subset uses $n_k$ examples from its local data. In our case, the volume is related to how long the smart meter has been generating data and how many of it is saved locally. The used dataset is composed of sliding windows with a predetermined number of look-back steps. SGD is then used by each client $k$ to compute the average gradient $g_k$ , with a learning rate $\eta$ . The updated models $w_k$ are sent to the server to be aggregated.
70
+
71
+ Algorithm 1 Federated Averaging Algorithm. $r_{max}$ is the maximum number of rounds. $\eta$ is the learning rate and $N = \sum_{k} n_{k}$
72
+ 1: initialize the model in training round $r = 0$
73
+ 2:
74
+ 3: while $r < r_{max}$ do
75
+ 4: Select subset $K$ of clients;
76
+ 5:
77
+ 6: for client $k$ in $K$ do
78
+ 7: if $\sigma(\text{monthlyload}) > \text{threshold}$ then
79
+ 8: $k$ receives model $w_{r}$ ;
80
+ 9:
81
+ 10: $k$ computes average gradient $g_{k}$ with SGD;
82
+ 11:
83
+ 12: $k$ updates local model
84
+ 13: $w_{r+1}^{k} \gets w_{r}^{k} - \eta g_{k}$ ;
85
+ 14:
86
+ 15: $k$ sends updated model to server;
87
+ 16: end if
88
+ 17: end for
89
+ 18: server computes new global model using the equation $:w_{r+1} \gets \sum_{k=0}^{K} \frac{n_{k}}{N} w_{r+1}^{k}$ ;
90
+ 19: start next round $r \gets r + 1$ ;
91
+ 20: end while
92
+
93
+ However, the centralized model may not fit all the users' electrical consumption. A proposed solution to this problem is Personalization. Personalization is the focus of many applications that require understanding user behaviour and adapting to it. It consists on retraining the centralized model using user-specific data to build a personalized model for each user. This can be achieved through retraining the model for a small number of epochs locally using exclusively the user's data [24].
94
+
95
+ Federated learning has fewer privacy risks than centralized server storage, since even when data are anonymized, the users' identities are still at risk and can be discovered through reverse engineering. The model updates sent by each client are ephemeral and never stored on the server; weight updates are processed in memory and are discarded after aggregation. The federated learning procedure requires that the individual weight uploads will not be inspected or analyzed. This is still more-secure than server training because the network and the server cannot be entrusted with fine-grained user data. Some
96
+
97
+ data still have to be sent in an aggregated form for billing, but these data do not reveal many details. Techniques such as secure aggregation [25] and differential privacy[26] are being explored to enforce trust requirements.
98
+
99
+ # C. Networking Load Gain
100
+
101
+ To evaluate the gain in network load in FL contrast to centralized training, we first define the network load $L_{sC}$ for a server $s$ in centralized training in Eq. 1 and the network load in FL $L_{sF}$ in Eq. 2.
102
+
103
+ $S_{k - d}$ is the size of data sent by the client $k$ and $S_{m}$ is the size of the model. In the centralized training, $d_{k}$ is the number of hops between client $k$ and the server.
104
+
105
+ $$
106
+ L _ {s C} = \sum_ {k = 1} ^ {N} S _ {k - d} \times d _ {k} \tag {1}
107
+ $$
108
+
109
+ $$
110
+ L _ {s F} = S _ {m} \times \sum_ {r = 1} ^ {r _ {\max }} \sum_ {k = 1} ^ {K} d _ {k, r} \tag {2}
111
+ $$
112
+
113
+ where $d_{k,r}$ is the number of hops between the client $k$ selected in round $r$ and the server, and $K$ is the number of users in each subset.
114
+
115
+ Using Eq.1 and Eq.2, we define the gain in networking load as follows:
116
+
117
+ $$
118
+ G _ {s} = 1 - L _ {s F} / L _ {s C} \tag {3}
119
+ $$
120
+
121
+ # IV. SIMULATION AND RESULTS
122
+
123
+ # A. Dataset Pre-Processing and Evaluation Method
124
+
125
+ This research was conducted using data from Pecan Street Inc. Dataport site. Dataport contains unique, circuit-level electricity use data at one-minute to one-second intervals for approximately 800 homes in the United States, with Photovoltaics generation and Electrical Vehicles charging data for a subset of these homes [27]. We chose a subset of 200 clients who have similar properties from this dataset. It is composed of the same kind of houses (detached-family homes), located in the same area (Texas). The dataset is composed of records between January 1st 2019 and March 31st 2019 with a one-hour resolution data. The weather fluctuations in this period are low, so the seasonal factor can be ignored in this study. The data of each client is prepared to be ready for further analysis. First, we transform the data to be in a scale between 0 and 1. Then we transform the time series into sliding windows with look-backs of size 12 and a look-ahead of size 1. Finally, we split data into train and test subsets (90% for training and 10% for test). We also split the clients into two groups: 180 participating in the federated learning process, and 20 are left for further evaluation for how well the model can fit non-participating clients.
126
+
127
+ We use RMSE and MAPE to evaluate the model's performance with regard to the prediction error. RMSE allows us to quantify the error in terms of energy, while MAPE is a
128
+
129
+ percentage quantifying the size of the error relative to the real value. The expressions of RMSE and MAPE are as follows:
130
+
131
+ $$
132
+ R M S E = \sqrt {\frac {\sum_ {i = 1} ^ {P} \left(y _ {i} - \hat {y} _ {i}\right) ^ {2}}{N}} \tag {4}
133
+ $$
134
+
135
+ $$
136
+ M A P E = \frac {100 \%}{P} \sum_ {i = 1} ^ {P} \left| \frac {y _ {i} - \hat {y} _ {i}}{y _ {i}} \right| \tag{5}
137
+ $$
138
+
139
+ where $\hat{y}_i$ is the predicted value, $y_i$ is the actual value and $P$ is the number of predicted values.
140
+
141
+ # B. Simulations setup
142
+
143
+ The simulations were conducted on a laptop with a 2,2 GHz Intel i7 processor and 16GB of memory and NVIDIA GeForce GTX 1070 graphic card. We used Tensorflow Federated 0.4.0 with Tensorflow 1.13.1 backend.
144
+
145
+ Hyper-parameter tuning in deep learning models is important to obtain the best forecasting performance. However, in this work, we only focus on evaluating the federated learning paradigm. Previous work shows performance insensitivity to combinations of some layers and layer size, as long as we use multiple layers and that the number of hidden nodes is sufficiently large [28]. It was also suggested that very deep networks are prone to under-fitting and vanishing gradients. Following these rules, the initial model hyper-parameters (e.g. number of layers, and time steps to be considered) were chosen by random search on a randomly selected client's data. The retained model has two LSTM hidden layers composed of 200 neurons each. The loss function used is Mean squared error and the optimiser chosen is Adam. The model converges around the 20th epoch and thus we use close values for rounds and epochs.
146
+
147
+ # C. Numerical Results
148
+
149
+ # 1) Evaluated scenarios:
150
+
151
+ The different scenarios that were evaluated are summarized in Table I. As explained in the previous section, in each round, only a subset of clients train the model. We modify the number of clients in the subset selected in each round, to see the effect of larger subsets. We also vary the number of epochs of local training. In all the scenarios, the federated learning algorithm was run for 20 rounds.
152
+
153
+ TABLE I: Used scenarios
154
+
155
+ <table><tr><td>Scenarios</td><td>Clients in subset</td><td>Local Epochs</td></tr><tr><td>1</td><td>5</td><td>1</td></tr><tr><td>2</td><td>20</td><td>1</td></tr><tr><td>3</td><td>5</td><td>5</td></tr><tr><td>4</td><td>20</td><td>5</td></tr></table>
156
+
157
+ # 2) Results for global models:
158
+
159
+ The evaluated scenarios resulted in global models that are obtained following the federated learning approach. These models are evaluated in terms of RMSE and MAPE as shown in Tables II and III. Null consumption values have been
160
+
161
+ disgarded when calculating MAPE. Table II summarizes the results for the participating clients in the different scenarios. In our case, the load forecast is on a granular level (single house) and on a short term (1 hour), therefore the values of MAPE achieved in Table II for various models are reasonable, and this level of accuracy is anticipated as similar values have been reported by previous works [28, 29]. These works also report that the forecasting accuracy tends to be low for short-term forecasting horizons. One of the most notable things we notice is that the global model fits some clients better than others when considering the fact that not all clients have similar profiles. We also notice that selecting a bigger number of clients in each round is preferable, but in cases where sending updates is more expensive in terms of networking, the difference can be compensated by using more local training epochs. The results are similar when applied to the set of clients who did not participate in the training.
162
+
163
+ TABLE II: Resulting RMSE and MAPE for global models in the considered scenarios for the 180 participating clients
164
+
165
+ <table><tr><td></td><td colspan="3">RMSE</td><td colspan="3">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.070</td><td>2.652</td><td>0.605</td><td>10.65%</td><td>83.35%</td><td>41.40%</td></tr><tr><td>2</td><td>0.045</td><td>2.55</td><td>0.578</td><td>9.18%</td><td>87.63%</td><td>38.39%</td></tr><tr><td>3</td><td>0.026</td><td>2.652</td><td>0.576</td><td>9.45%</td><td>96.84%</td><td>37.43%</td></tr><tr><td>4</td><td>0.047</td><td>2.68</td><td>0.583</td><td>9.71%</td><td>93.74%</td><td>38.91%</td></tr></table>
166
+
167
+ TABLE III: Resulting RMSE and MAPE for global models in the considered scenarios for the 20 non-participant clients
168
+
169
+ <table><tr><td></td><td colspan="3">RMSE</td><td colspan="3">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.262</td><td>1.024</td><td>0.589</td><td>15.82%</td><td>60.72%</td><td>44.98%</td></tr><tr><td>2</td><td>0.241</td><td>0.979</td><td>0.550</td><td>16.08%</td><td>55.34%</td><td>40.95%</td></tr><tr><td>3</td><td>0.229</td><td>0.99</td><td>0.530</td><td>15.78%</td><td>53.98%</td><td>39.18%</td></tr><tr><td>4</td><td>0.235</td><td>1.004</td><td>0.543</td><td>16.04%</td><td>56.61%</td><td>41.15%</td></tr></table>
170
+
171
+ # 3) Behaviour of personalization:
172
+
173
+ In this section, we study the effect of personalization on the performance of the models. First we test if re-training the model locally for the participant clients gives better results. Then we apply the same thing to the set of clients who did not participate in the training. The models were retrained for 5 epochs for each client. Results for the set of clients participating in the training are summarized in Table IV and for the non-participating clients in Table V. We notice an overall improvement of most of the models. For example, the model 1 has an overall improvement of $5.07\%$ in terms of MAPE for the participating set of clients and of $4.78\%$ on the non-participating clients set. However, for some clients, the performance can not be improved despite retraining, and this, as we mentioned earlier, is related to the quality of historical data points. Applying the models to these clients' consumption profiles results in very high MAPE, which affects the average results. These clients should be treated as outliers, nonetheless, this is beyond the scope of this study.
174
+
175
+ To illustrate the improvements on predictions using personalization, we randomly selected a client from the participant
176
+
177
+ TABLE IV: Resulting RMSE and MAPE after personalization over 180 clients
178
+
179
+ <table><tr><td></td><td colspan="3">RMSE</td><td colspan="3">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.0</td><td>2.47</td><td>0.550</td><td>8.13%</td><td>99.16%</td><td>36.33%</td></tr><tr><td>2</td><td>0.0</td><td>2.47</td><td>0.551</td><td>7.89%</td><td>91.23%</td><td>36.39%</td></tr><tr><td>3</td><td>0.0</td><td>2.371</td><td>0.536</td><td>7.64%</td><td>88.76%</td><td>34.27%</td></tr><tr><td>4</td><td>0.0</td><td>2.375</td><td>0.536</td><td>8.00%</td><td>82.14%</td><td>34.14%</td></tr></table>
180
+
181
+ TABLE V: Resulting RMSE and MAPE after personalization for 20 non-participating clients
182
+
183
+ <table><tr><td></td><td colspan="3">RMSE</td><td colspan="3">MAPE</td></tr><tr><td>Scenario</td><td>Min</td><td>Max</td><td>Mean</td><td>Min</td><td>Max</td><td>Mean</td></tr><tr><td>1</td><td>0.232</td><td>0.905</td><td>0.516</td><td>18.35%</td><td>53.70%</td><td>40.20%</td></tr><tr><td>2</td><td>0.233</td><td>0.901</td><td>0.516</td><td>16.99%</td><td>58.68%</td><td>40.71%</td></tr><tr><td>3</td><td>0.235</td><td>0.909</td><td>0.516</td><td>15.79%</td><td>54.82%</td><td>39.49%</td></tr><tr><td>4</td><td>0.232</td><td>0.907</td><td>0.509</td><td>15.96%</td><td>52.96%</td><td>39.01%</td></tr></table>
184
+
185
+ set (client 4313) and a client from the non-participant set (client 8467). We applied the global model 4 and the corresponding personalized models. The actual load profiles and the predicted profiles are shown in Fig.3 and Fig.4. Both models fit the overall behaviour of the consumption profiles.
186
+
187
+ We conclude that we can indeed train powerful models for a population's consumption profiles using only a subset of the users forming it. For applications that have high accuracy requirements, the model can be retrained resulting in a personalized model that follows the profile's curves better, yielding more accurate predictions. Nonetheless, the predictions obtained with the global model can be a good starting point for new clients who don't have enough data for personalization.
188
+
189
+ # 4) Gain in network load:
190
+
191
+ To illustrate the gain in the network load, we can consider the most basic case where the distance between all the clients and the MEC server is 1-Hop. The size of the model is $1,9\mathrm{Kb}$ and the size of the used data is $16\mathrm{Mb}$ . Using Eq.3, the gain in the scenarios 1 and 3 is $97\%$ , while scenarios 2 and 4 result in a gain of $90\%$ . This is a significant gain, especially when considering that the approach could be applied at the scale of a city or bigger, for example.
192
+
193
+ # V. REMARKS & FUTURE WORK
194
+
195
+ The feasibility of the proposed approach is dependent on the capabilities of the edge devices to perform local training. New IoT devices have sufficient computing hardware to run complex machine learning models, but training a neural network is very likely to compromise device performance. However, some lightweight machine learning frameworks have emerged such as Tensorflow Lite<sup>1</sup> which provides solid ground for future implementations.
196
+
197
+ The accuracy of the models, even after personalization, still varies depending on the user. To improve the results, neural networks should be coupled with other methods, such as a prior clustering of clients using criteria other than the
198
+
199
+ ![](images/09b144c5c1759917b510347fc989b41178d1e6bebbcc02d1108a6fc55c0e3192.jpg)
200
+ Fig. 3: Predictions for next hour consumption for client 4313 who participated in training the global model 4. Local training for 5 epochs reduced RMSE from $0.55\mathrm{kW}$ to $0.388\mathrm{kW}$ .
201
+
202
+ ![](images/d9650ebbb10eabb5c26a8289797771035f397b0aed78f13b1176f493cc0971ee.jpg)
203
+ Fig. 4: Predictions for next hour consumption for client 8467 who did not participate in training the global model 4. Local training for 5 epochs reduced RMSE from $0.8\mathrm{kW}$ to $0.72\mathrm{kW}$ .
204
+
205
+ geographical proximity. Solving the problem of outliers in this context should also be investigated.
206
+
207
+ # VI. CONCLUSION
208
+
209
+ Individual short-term load forecasting is a challenging task considering the stochastic nature of consumption profiles. In this paper, we proposed a system model using Edge computing and federated learning to tackle privacy and data diversity challenges related to short-term load forecasting in the smart grid. To the best of our knowledge, this represents one of the first studies of federated learning in the smart grid context. Unlike centralized methods, in the proposed system federated learning uses edge devices to train models, hence reducing security risks to the ones related to the device only. We conducted experiments to evaluate the performance of both centralized and personalized models in federated settings. The simulations results show that it is a promising approach to create highly performing models with a significantly reduced networking load compared to a centralised model, while preserving the privacy of consumption data.
210
+
211
+ # ACKNOWLEDGEMENT
212
+
213
+ The authors would like to thank the Natural Sciences and Engineering Research Council of Canada, for the financial support of this research.
214
+
215
+ # REFERENCES
216
+
217
+ [1] Elena Mocanu, Phuong H. Nguyen, Madeleine Gibescu, and Wil L. Kling. Deep learning for estimating building energy consumption. Sustainable Energy, Grids and Networks, 6:91-99, June 2016.
218
+ [2] W. Kong, Z. Y. Dong, D. J. Hill, F. Luo, and Y. Xu. Short-Term Residential Load Forecasting Based on Resident Behaviour Learning. IEEE Transactions on Power Systems, 33(1):1087-1088, January 2018.
219
+ [3] Dhaou Said et al. Advanced scheduling protocol for electric vehicle home charging with time-of-use pricing. pages 6272-6276, June 2013. ISSN: 1938-1883.
220
+ [4] A. Almalaq and J. J. Zhang. Evolutionary Deep Learning-Based Energy Consumption Prediction for Buildings. IEEE Access, 7:1520-1531, 2019.
221
+ [5] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018.
222
+ [6] Abderrahime Filali et al. Prediction-Based Switch Migration Scheduling for SDN Load Balancing. In ICC 2019 - 2019 IEEE International Conference on Communications (ICC), pages 1-6, May 2019. ISSN: 1938-1883.
223
+ [7] Yanbo Huang. Advances in Artificial Neural Networks - Methodological Development and Application. Algorithms, 2(3):973-1007, September 2009.
224
+ [8] B. Stephen, X. Tang, P. R. Harvey, S. Galloway, and K. I. Jennett. Incorporating Practice Theory in Sub-Profile Models for Short Term Aggregated Residential Load Forecasting. IEEE Transactions on Smart Grid, 8(4):1591-1598, July 2017.
225
+ [9] H. Shi, M. Xu, and R. Li. Deep Learning for Household Load Forecasting—A Novel Pooling Deep RNN. IEEE Transactions on Smart Grid, 9(5):5271-5280, September 2018.
226
+ [10] H. Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data.
227
+ [11] S. Hochreiter and J. Schmidhuber. Long short-term memory. *Neural Computation*, 9(8):1735-1780, November 1997.
228
+ [12] Salah Bouktif, Ali Fiaz, Ali Ouni, and Mohamed Adel Serhani. Optimal Deep Learning LSTM Model for Electric Load Forecasting using Feature Selection and Genetic Algorithm: Comparison with Machine Learning Approaches †. Energies, 11(7):1636, July 2018.
229
+ [13] Jian Zheng, Cencen Xu, Ziang Zhang, and Xiaohua Li. Electric load forecasting in smart grids using Long-Short-Term-Memory based Recurrent Neural Network. In 2017 51st Annual Conference on Information Sciences and Systems (CISS), pages 1-6, March 2017.
230
+ [14] D. L. Marino, K. Amarasinghe, and M. Manic. Building energy load forecasting using Deep Neural Networks. In *IECON 2016 - 42nd Annual Conference of the IEEE Industrial Electronics Society*, pages 7046-7051, October 2016.
231
+ [15] Guangya Zhu, Tin-Tai Chow, and Norman Tse. Short-term load forecasting coupled with weather profile generation methodology. Building Services Engineering Research and Technology, 39(3):310-327, May 2018.
232
+ [16] Dhaou Said et al. Scheduling protocol with load management for EV charging. In 2014 IEEE Global Communications Conference, pages 362-367, December 2014. ISSN: 1930-529X.
233
+ [17] Jihene Rezgui et al. Smart charge scheduling for EVs based on two-way communication. In 2017 IEEE International Conference on Communications (ICC), pages 1-6, May 2017. ISSN: 1938-1883.
234
+ [18] P. Kumar, Y. Lin, G. Bai, A. Paverd, J. S. Dong, and A. Martin. Smart Grid Metering Networks: A Survey on Security, Privacy and Open Research Issues. IEEE Communications Surveys Tutorials, pages 1-1, 2019.
235
+ [19] M. Badra and S. Zeadally. Design and Performance Analysis of a Virtual Ring Architecture for Smart Grid Privacy. IEEE Transactions on Information Forensics and Security, 9(2):321-329, February 2014.
236
+
237
+ [20] Y. Gong, Y. Cai, Y. Guo, and Y. Fang. A Privacy-Preserving Scheme for Incentive-Based Demand Response in the Smart Grid. IEEE Transactions on Smart Grid, 7(3):1304-1313, May 2016.
238
+ [21] H. Park, H. Kim, K. Chun, J. Lee, S. Lim, and I. Yie. Untraceability of Group Signature Schemes based on Bilinear Mapping and Their Improvement. In Fourth International Conference on Information Technology (ITNG'07), pages 747-753, April 2007.
239
+ [22] Quoc-Viet Pham, Fang Fang, Vu Nguyen Ha, Mai Le, Zhiguo Ding, Long Bao Le, and Won-Joo Hwang. A Survey of Multi-Access Edge Computing in 5g and Beyond: Fundamentals, Technology Integration, and State-of-the-Art. arXiv:1906.08452 [cs, math], June 2019. arXiv: 1906.08452.
240
+ [23] Andrew Hard, Kanishka Rao, Rajiv Mathews, Swaroop Ramaswamy, Françoise Beaufays, Sean Augenstein, Hubert Eichner, Chloe Kiddon, and Daniel Ramage. Federated learning for mobile keyboard prediction.
241
+ [24] Khe Chai Sim, Petr Zadrazil, and Françoise Beaufays. An Investigation into On-Device Personalization of End-to-End Automatic Speech Recognition Models. In Interspeech 2019, pages 774-778. ISCA, September 2019.
242
+ [25] Keith Bonawitz, Vladimir Ivanov, Ben Kreuter, Antonio Marcedone, H. Brendan McMahan, Sarvar Patel, Daniel Ramage, Aaron Segal, and Karn Seth. Practical Secure Aggregation for Federated Learning on User-Held Data. arXiv:1611.04482 [cs, stat], November 2016. arXiv: 1611.04482.
243
+ [26] Robin C. Geyer, Tassilo Klein, and Moin Nabi. Differentially Private Federated Learning: A Client Level Perspective. arXiv:1712.07557 [cs, stat], December 2017. arXiv: 1712.07557.
244
+ [27] Pecan street inc. dataport 2019 [online] https://dataport.pecanstreet.org/.
245
+ [28] W. Kong, Z. Y. Dong, Y. Jia, D. J. Hill, Y. Xu, and Y. Zhang. ShortTerm Residential Load Forecasting Based on LSTM Recurrent Neural Network. IEEE Transactions on Smart Grid, 10(1):841-851, January 2019.
246
+ [29] Matthew Rowe, Timur Yunusov, Stephen Haben, William Holderbaum, and Ben Potter. The Real-Time Optimisation of DNO Owned Storage Devices on the LV Network for Peak Reduction. Energies, 7(6):3537-3560, June 2014.
2201.11xxx/2201.11248/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6295230245d254487942106781774276677cce91488a43d27b6f7aab4a20ed1
3
+ size 261566
2201.11xxx/2201.11248/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11271/91a22180-c8a5-4c9c-8d6e-dc5ac7e21a17_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8227da2e2d7606e4d03c55e221304ea8defd73c2774f3e7b59b94e97dc47426c
3
+ size 1872487
2201.11xxx/2201.11271/full.md ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Clustered Vehicular Federated Learning: Process and Optimization
2
+
3
+ Afaf Taik Student Member, IEEE, Zoubeir Mlika Member, IEEE and Soumaya Cherkaoui Senior Member, IEEE
4
+
5
+ Abstract—Federated Learning (FL) is expected to play a prominent role for privacy-preserving machine learning (ML) in autonomous vehicles. FL involves the collaborative training of a single ML model among edge devices on their distributed datasets while keeping data locally. While FL requires less communication compared to classical distributed learning, it remains hard to scale for large models. In vehicular networks, FL must be adapted to the limited communication resources, the mobility of the edge nodes, and the statistical heterogeneity of data distributions. Indeed, a judicious utilization of the communication resources alongside new perceptive learning-oriented methods are vital. To this end, we propose a new architecture for vehicular FL and corresponding learning and scheduling processes. The architecture utilizes vehicular-to-vehicular(V2V) resources to bypass the communication bottleneck where clusters of vehicles train models simultaneously and only the aggregate of each cluster is sent to the multi-access edge (MEC) server. The cluster formation is adapted for single and multi-task learning, and takes into account both communication and learning aspects. We show through simulations that the proposed process is capable of improving the learning accuracy in several non-independent and identically-distributed (non-i.i.d) and unbalanced datasets distributions, under mobility constraints, in comparison to standard FL.
6
+
7
+ Keywords—Autonomous Driving; Clustering; Federated Learning; Privacy; Vehicular Communication.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ Autonomous driving (AD) requires little-to-no human interactions to build an intelligent transportation system (ITS). Consequently, AD helps in reducing accidents caused by human driving errors. Artificial intelligence (AI) plays an essential role in AD by empowering several applications such as object detection and tracking through machine learning (ML) techniques [1, 2].
12
+
13
+ With the raise of AI research and deployment over the last decade, the development of autonomous vehicles has seen significant advancements. Indeed, vehicle manufacturers put a lot of effort to deploy AI schemes aiming to achieve human-level situational awareness. However, owing to technical difficulties and several ethical and legal challenges, it is still challenging for vehicles to achieve full autonomy. In fact, autonomous vehicles need to fulfill strict requirements of reliability and efficiency, and achieve high levels of situational awareness. Vehicle manufacturers are deploying efforts to achieve these goals. Autonomous vehicles will be capable of sensing their network environment using embedded sensors and share information with other vehicles and equipment through wireless communication. Autonomous vehicles can be equipped with
14
+
15
+ Authors are with the INTERLAB Research Laboratory, Faculty of Engineering, Department of Electrical Engineering and Computer Engineering, Université de Sherbrooke, Sherbrooke (QC) J1K 2R1, Canada.
16
+
17
+ LiDAR sensors, camera sensors, and radar sensors that collect important amounts of data to share with the vehicular network.
18
+
19
+ With the prevalence of connected vehicles and the transition toward autonomy, it is expected that vehicles will no longer rely only on locally collected data for localization and operation. Instead, enhanced situational awareness can be attained through exchanging raw and processed sensor data among large networks of interconnected vehicles [3]. In contrast to status data sharing, sensor data sharing becomes a pivotal operation for different safety applications, such as HD map building [4] and extended perception [5]. These data are also necessary to produce or enhance ML models that will be capable of performing AD tasks, such as dynamically adjusting the vehicle's speed, braking, and steering, by observing their surrounding environment.
20
+
21
+ Nonetheless, extensive sensor data sharing raises alarming privacy issues since vehicle sensor sharing involves sharing raw and processed data among vehicles. These data expose sensitive information about the vehicle, the driver, and the passengers, and could be used in a harmful way by a malicious entity. While privacy in vehicle status sharing has been already been extensively addressed and regulated by vehicle manufacturers—through a dynamic change of media access control (MAC) address and data anonymization, these regulations have not been extended to sensor data sharing. Moreover, to attain fully AD and enhance the overall ML models' performance, the deployed ML.AI models in the vehicle need to be updated and improved periodically by original equipment manufacturers (OEM). This requires the vehicles to upload the collected data to the OEMs, which further violates data privacy. Indeed, when data is uploaded to multi-access edge computing (MEC) [6, 7] servers, or to the cloud, it may be subject to be malicious interception and misuse.
22
+
23
+ Federated learning (FL)[8] has emerged as an attractive solution for privacy-preserving ML. FL consists of the collaborative training of ML models among edge devices without data-sharing, which makes it a promising solution for the continuous improvement of ML models in AD. Indeed, with FL, edge devices share their models parameters instead of their private data and then the models are aggregated at MEC servers to obtain a global accurate model.
24
+
25
+ When FL is used in a vehicular network context, a centralized entity (e.g., a MEC server) initializes a model and distributes it among participant vehicles. Each vehicle then trains the model using local data and sends the resulting model parameters to the central entity for aggregation.
26
+
27
+ The predominant FL training scheme is a synchronous ag-
28
+
29
+ ggregation. Accordingly, the MEC server waits for all vehicles to send their updates before aggregating them.
30
+
31
+ The assumption of FL is that the goal for participating end devices (also called end users throughout the article) is to approximate the same global function. Nevertheless, this is not the case for non-i.i.d data, particularly in the case of competing objectives, where a single joint model cannot be optimal for all end devices simultaneously. Consequently, clustering [9, 10] was proposed to group users with similar objectives and build multiple versions of the trained model. However, these works suppose the availability of all the end users and require their participation in the training for cluster-formation. Therefore, even if vehicle clustering for FL is interesting for the above mentioned reasons, due to the high-speed mobility, Doppler effect, and frequent handover (short inter-connection times), not all vehicle updates can be collected at the MEC servers. Further, due to the different mobility patterns, not all vehicles can have strong signal quality with the MEC servers. As a result, participating vehicles should be carefully selected and communication must be efficiently scheduled.
32
+
33
+ Vehicle-to-vehicle (V2V) communication offers a new opportunity for FL deployment that bypasses the communication bottleneck with the MEC server[11]. A cluster of vehicles can collaboratively train models and a chosen cluster-head can aggregate their updates so as only one model is sent to the MEC server. To achieve this, two main questions need to be addressed: how to adequately form FL clusters under mobility constraints; and how to select the cluster-heads in such settings.
34
+
35
+ In this article, we propose a cluster-based scheme for FL in vehicular networks. The clustering scheme consists of grouping vehicles with common characteristics, not only in terms of direction and velocity, but also from a learning perspective through the evaluation of the updates' similarity. Thus, the proposed scheme allows to accelerate the models' training through ensuring (i) a larger number of participants (ii) possibility to train several models to adapt to non-i.i.d and unbalanced data distributions.
36
+
37
+ The main contributions of this article can be summarized as follows:
38
+
39
+ 1) we design an architecture and corresponding FL process for clustered FL in vehicular environments;
40
+ 2) we formulate a joint cluster-head selection and resource block allocation problem taking into account mobility and data properties;
41
+ 3) we formulate a matching problem for cluster formation taking into account mobility and model preferences;
42
+ 4) we prove that the cluster-head problem is NP-hard and we propose a greedy algorithm to solve it;
43
+ 6) we evaluate the proposed scheme through extensive simulations.
44
+
45
+ The remainder of this article is organized as follows: In Section II, we present the background for FL and related work. In Section III, we present the design of the learning process and considered system model components. In Section IV, we formulate the cluster-head selection and vehicle association problems, and we present the proposed solution. Simulation
46
+
47
+ TABLE I: List of Notations.
48
+
49
+ <table><tr><td>Notations</td><td>Description</td></tr><tr><td>Tk</td><td>Rate of stay of vehicle k</td></tr><tr><td>TotalRB</td><td>Total available resource blocks</td></tr><tr><td>s</td><td>Model size</td></tr><tr><td>ε</td><td>Number or local epochs</td></tr><tr><td>g</td><td>Global model</td></tr><tr><td>θk</td><td>Model update of vehicle k</td></tr><tr><td>ttraink</td><td>Training time for vehicle k</td></tr><tr><td>rk</td><td>Achievable data rate of vehicle k</td></tr><tr><td>tupk</td><td>Upload time for vehicle k</td></tr><tr><td>Pk</td><td>Transmit power of vehicle k</td></tr><tr><td>N0</td><td>Power spectral density of the Gaussian noise</td></tr><tr><td>|Dk|</td><td>vehicle k&#x27;s dataset size</td></tr><tr><td>Ik</td><td>Data-diversity of vehicle k</td></tr><tr><td>Rk,h</td><td>Relationship of vehicles k and h</td></tr></table>
50
+
51
+ results are presented in Section V. At last, conclusions and future work are presented in Section VI.
52
+
53
+ # II. BACKGROUND
54
+
55
+ In this section, we first present a background on FL and challenges tackled in this paper, then we present related work that enables and motivates our work.
56
+
57
+ # A. Federated Learning
58
+
59
+ FL is a privacy-preserving distributed training framework, which consists of the collaborative training of a single ML model among different participants (e.g., IoT devices) on their local datasets. The training is an iterative process that starts with the global model initialization by a centralized entity (e.g., a server). In every communication round $i$ , a selected subset of $N$ participants receive the latest global model $\theta_{t}$ . Then, every participant $k$ trains the model by performing multiple iterations of stochastic gradient descent (SGD) on minibatches from its local dataset $D_{k}$ . The local training results in a several weight-update vectors $\Delta \theta_{k}^{t + 1}$ , which are sent to the server. The last step is the model aggregation at the server, which is typically achieved using weighted aggregation [8] following Eq.1. The process is then repeated until the model converges.
60
+
61
+ $$
62
+ \theta_ {t + 1} = \theta_ {t} + \sum_ {k = 1} ^ {N} \frac {\left| D _ {k} \right|}{| D |} \Delta \theta_ {k} ^ {t + 1} \tag {1}
63
+ $$
64
+
65
+ While this aggregation method takes into account the unbalanced aspect of datasets' size, it is not always suitable for non-i.i.d distributions. Furthermore, FL in wireless networks in general, and in vehicular networks in particular, is subject to the following challenges:
66
+
67
+ Statistical heterogeneity: One of the underlying challenges for training a single joint model in FL settings is the presence of non-i.i.d data. For instance, some nodes only have access to data from a subset of all possible labels for a given task, while other nodes may have access to different input features. Furthermore, varying preferences for instance can lead to concept shift (i.e., nodes classify same features under different labels, or vice-versa). In practice, these non-i.i.d settings are highly likely to be present in a given massively distributed
68
+
69
+ dataset. Thus, training models under these settings require new sets of considerations.
70
+
71
+ Partial Participation: Given the scarcity of the communication resources, the number of participating nodes is limited. In fact, the generated traffic grows linearly with the number of participating nodes and the model size. Moreover, the heterogeneity of the nodes in terms of computational capabilities and mobility (i.e., velocity and direction) introduces stringent constraints on the communication. Hence, enabling FL on the road in a communication-efficient way is far from an easy task.
72
+
73
+ # B. Related Work
74
+
75
+ Several works consider FL as a key enabler for vehicular networks in general, and AD in particular [12], such as secure data sharing [3], Autonomous Controllers [13], caching [14], and travel mode identification from non-i.i.d GPS trajectories [15]. Nonetheless, deploying FL on the road remains a challenging task due to uncertainties related to mobility and communication overhead. To overcome the communication bottleneck, works [16, 17, 18] have proposed judicious node selection and resource allocation for efficient training. However, these schemes are specifically designed for the topology and dynamics of standard wireless/cellular networks with high node density but relatively low mobility. In contrast, vehicular networks have rather low node density and very high node mobility [19]. As a result, new schemes are required for FL on the road. Meanwhile, V2V communication offers a new possibility for FL deployment that bypasses the bottleneck of communication with the MEC server[20, 21]. In vehicular networks, some vehicles serve as edge nodes to which neighboring nodes offload computation and data analysis tasks [22]. Edge vehicles are also used to provide a gateway functionality by ensuring continuous availability of diversified services such as multimedia content sharing [23]. A common practice among such works is creating clusters of vehicles where the edge vehicle acts as a cluster head. The clusters are formed based on several metrics such as the distance between the vehicles, their velocity and direction. Yet, these clustering schemes cannot be directly exploited in the context of FL. Recent VANET clustering works principally design algorithms based on their primary application [24, 25, 26, 27]. This is a logical approach since the design of a clustering algorithm highly influences the performance of the application for which it is used. A popular approach for cluster head selection widely used in the literature [25, 28, 29] requires each vehicle to calculate an index quantifying its fitness to act as a cluster head for its neighbours. Vehicles wishing to affiliate with a cluster head rank all neighbours in their neighbour table and request association with the most highly-ranked candidate node. The index is calculated as a weighted sum of several metrics, such as the degree of connectivity and link stability, with weights chosen depending on the importance of the considered metrics. However, due to the nature of FL applications, metrics related to learning/data should also be considered.
76
+
77
+ Furthermore, clustering is already used in FL as a means to accelerate the training by grouping nodes with similar
78
+
79
+ optimization goals, which train different versions of the model instead of one global model [9, 30, 10, 31]. In fact, one of the fundamental challenges in FL is the presence of non-i.i.d and unbalanced data distributions [32, 33]. These challenges go against the premise of FL which aims to train one global model. Such settings require new mechanisms to be put in place in order to ensure models' convergence. Clustered FL has attracted several research efforts, as it has generalization [34] and convergence [31] guarantees under non-i.i.d settings. By creating different models to adapt to different end users' distributions, clustered FL allows better model performance in the case of concept-shift. Concept-shift [10] occurs when different inputs do not have the same label across users as preferences vary. Moreover, in clustered FL, training becomes resilient to poisoning attacks [35] such as label flipping [36] (i.e., nodes misclassify some inputs under erroneous labels).
80
+
81
+ For instance, authors in [9], develop a clustered FL procedure. Their work allows to find an optimal bipartitioning of the users based on cosine similarity for the purpose of producing personalized models for each cluster. The bipartitioning is repeated whenever FL has converged to a stationary point. In [10], a single clustering step, in a predetermined communication round, is introduced. In this step, all the users are required to participate and the similarity of the updates is used to form clusters using hierarchical clustering. Nonetheless, the proposed approach requires knowing a distance threshold on the similarity values between the updates to form the clusters. Furthermore, cluster-based approaches assume that all the users participate, which is unfeasible under dynamic and uncertain vehicular networks.
82
+
83
+ To the best of our knowledge, our work is the first to address the problem of clustered FL in hierarchical mobile architectures, while considering the users' data distributions, wireless communication characteristics, and resource allocation constraints. Specifically, unlike other studies, we consider the learning aspect (i.e., nodes dataset characteristics and model dissimilarities), in addition to communication constraints (i.e., wireless channel quality, mobility, and communication latency). Henceforth, we propose a practical way to deploy FL in vehicular environments.
84
+
85
+ # III. SYSTEM MODEL
86
+
87
+ We consider a vehicular network composed of a set $V$ of $K$ vehicles and a set $U$ of $N$ gNodeBs. Both the communication among vehicles and with the gNodeBs are through wireless links. Additionally, gNodeBs are connected to the Internet via a reliable backhaul link. The vehicles have enough computing and storage resources for the training, and the gNodeBs are equipped with MEC servers. MEC servers are used to schedule the vehicles nearby, aggregate the updates and manage the clusters. In the following, we explain the proposed cluster-based training process and the different components of the considered system model (i.e., communication and computation) in a vehicular environment.
88
+
89
+ # A. Process Overview
90
+
91
+ ![](images/50e440ba195cd2b24ff7c46d9135b2903fe972f0d825a0c4841aa7cd6e567988.jpg)
92
+ Fig. 1: Illustration of the different steps in clustered vehicular federated learning
93
+
94
+ FL in vehicular networks is subject to several challenges related to data, mobility, and communication and computation resources. In this paper, we consider these aspects in the design and optimization of the FL process in vehicular networks.
95
+
96
+ The first set of challenges are related to data, where the learning process should be adapted to take into account data heterogeneity in order to accelerate the model convergence. Data generated across different applications in vehicular networks depend on the specific vehicle sensors and these sensors' data acquisition activities which often leads to heterogeneous data distributions among FL participants (i.e., different dataset sizes and different data distributions). Furthermore, the dependence on data acquisition activities from vehicles with similar sensing capabilities makes the collected data highly redundant. As a result, local datasets cannot be regarded the same in terms of information richness, as some datasets may have more diverse and larger datasets than other participants. Furthermore, communication resources in this context are limited. In fact, in addition to the bandwidth's scarcity, the possible time for communication with the MEC server is limited by the time where a vehicle is in the area covered by the base station. For all these reasons, the participant selection and the bandwidth allocation mechanisms should be carefully designed for FL in vehicular networks. Hence, in this article, we use the data properties to guide the participants' selection in the training and communication process.
97
+
98
+ Furthermore, the model convergence speed is highly dependent on the number of collected updates. Vehicle-to-vehicle (V2V) communication offers a great alternative to bypass the communication bottleneck in vehicular networks by allowing
99
+
100
+ some select vehicles to act as mediators between other vehicles and the MEC server. We propose to use V2V in order to maximize the collected updates under the communication uncertainty.
101
+
102
+ In these perspectives, we propose to prioritize the vehicles with the most informative datasets and use them as cluster heads, while the remainder of the vehicles are associated with them. In this setting, each cluster-head aggregates the models of the vehicles in its cluster and uploads the resulting model. In fact, instead of sending all the collected updates, the cluster-head will aggregate the updates and send one aggregated model which is more communication-efficient. In this case, hierarchical FL is used as a means to optimize the communication in vehicular networks, where the MEC server will do a second round of aggregation.
103
+
104
+ Another aspect that needs to be considered is mobility and how it affects the communication among vehicles and with the MEC server. In order for the cluster-heads to successfully upload their models to the MEC server, the upload should be completed before the vehicles leave the coverage area of the BS. Furthermore, for the vehicles to be able to send their models to the cluster-head, their link lifetime (LLT) should be longer than the required time for training and uploading the models.
105
+
106
+ In order to adapt this approach to the case where multiple models need to be trained, other considerations need to be taken into account in this approach. In fact, in the case where data distributions are subject to concept-shift, a single model is not enough. Concept-shift is another kind of data heterogeneity that arises in cases where data is subjective and
107
+
108
+ depends on the preferences of end users, or in the presence of adversaries. In classification problems for instance, concept shift is when similar inputs have different labels depending on the end user. In the case of vehicles, the latter could simply not share the same model if they are not from the same OEM. The presence of different perspectives from different vehicles makes one model hard to fit all. In our paper, we use hierarchical clustering through evaluating the model updates and their cosine similarity. The clustering can be executed on a predetermined communication round or when the model's convergence slows down. The newly created models will be used to associate each vehicle to the most adequate cluster-head. The same model can be trained among several clusters as such redundancy is worthwhile when it comes to system robustness in the case of user dropout, and it also helps the model's convergence through collecting more updates.
109
+
110
+ All in all, to address the challenges linked to mobility and data heterogeneity, we design a mobility-aware scheme for clustered FL, that takes into account the data and model heterogeneity. The data heterogeneity is mainly considered in the selection of cluster-heads, while the model heterogeneity is used to create new models and in matching vehicles to cluster-heads. In the following subsections, we start with detailing the overall learning model, then we present the mathematical formulation of its different aspects. We detail the steps of the clustered vehicular FL training procedure, then we give the formulations of the different metrics used in the procedure.
111
+
112
+ # B. Learning Model
113
+
114
+ A summary of the process is given in Algorithm 1, and more details of the scheme are given as follows:
115
+
116
+ - Step 1 (Publish FL model and requirements, and receive feedback): A global model is published by the MEC server, alongside its data and computation resource requirements (e.g., data types, data sizes, and CPU cycles). Each vehicle $k$ satisfying the requirements sends positive feedback, in addition to other information such as its data diversity index $I_{k}$ (see Eq. 2) and current velocity $v_{k}$ .
117
+ - Step 2 (Select and schedule cluster-heads $H$ ): The MEC server chooses the cluster-heads according to the received information. The selection is based on the dataset characteristics (i.e., quality of the dataset and the quantity of the samples), defined in subsection III-B1, in addition to the state of the wireless channel and the projected duration of the communication reflected by the rate of stay (See Eq. 5). In fact, the quality of local dataset directly determines the quality and the importance of model updates, while the velocity and the state of the wireless channels determine whether the model update can be received during the communication round. The details about the data evaluation are given in subsection III-B1, and the algorithm (Algorithm 2) is explained in Section IV-A.
118
+ - Step 3 (Clusters formation): After cluster-head selection, the set of the remaining vehicles $NH$ are matched to cluster-heads (set $H$ ). The matching requires that the sum of training and upload time of vehicle $k$ is less than
119
+
120
+ the Link Lifetime (LLT) (defined in Eq. 8) between $k$ and $h \in H$ if they are to be matched. Furthermore, the matching aims to maximize the weighted sum of $R_{k,h}$ . $R_{k,h}$ symbolizes the relationship between $k$ and $h$ , and its definition changes depending on whether there is only one global model or several versions (See Eq.4). In the simple case of a single joint model, the clustering depends only on the mobility and accordingly for all the pairs $k \in NH, h \in H$ the value of $R_{k,h} = 1$ . Otherwise, each vehicle should train its preferred model. The preference is defined as the accuracy of the model trained by $h$ on the local data of $k$ . This definition is due to the fact that not all vehicles can participate in the updates clustering step (See Step 5).
121
+
122
+ - Step 4 (Model broadcast and training): The model is broadcasted to the participants, where each vehicle trains on its local data for $\epsilon$ local epochs, before sending the update to the corresponding cluster-head. Each cluster-head then aggregates the received models and sends the update to the MEC server, which in its turn aggregates the global updates of the clusters. Such hierarchical FL aggregation is widely adopted in the literature of FL [37, 38] and allows for more participation. Aggregating the updates at the MEC server level is required because each model version can be trained within several clusters, resulting in several global models. Such redundancy is necessary in the case of vehicular networks, as it allows more robustness to client drop-out.
123
+ - Step 5 (Updates Clustering and Preference Evaluation): If the global model does not converge after several communication rounds, or the goal accuracy is not attained, we perform a communication round (or several communication rounds) involving a large fraction of the vehicles on the global joint model. This step requires the collection of the updates at the MEC server without prior aggregation by cluster-heads as the aggregated models would mask the divergence of the different models. The updates are used to judge the similarity (defined in Eq.3) between participants using the hierarchical clustering algorithm. It is employed to iteratively merge the most similar clusters of participants up to a maximum number of clusters defined by the OEM. Fixing the maximum number of clusters allows to create clusters without prior knowledge of the possible distances between updates, while controlling the number of models in circulation. Once the clusters are created, new models are generated through aggregation. The models are broadcasted to the available vehicles. Each vehicle evaluates the models on its local data and sends them back to the MEC server. These values are later used to evaluate $R_{k,h}$ for each vehicle $k$ . The resulting models are then trained independently but simultaneously using the same process. This preferences' evaluation makes the difference between our work and previous work in clustered FL, as these works necessitate the participation of all the nodes, while in our work we tolerate partial participation.
124
+
125
+ The iterations and the steps' order are illustrated in Fig.1.
126
+
127
+ Algorithm 1 Clustered Vehicular Training procedure
128
+ 1: for $i\in [1\dots i_{max}]$ do
129
+ 2: if $i = 1$ then
130
+ 3: Step 1:
131
+ 4: initialize or download the newest model's parameters at the MEC server
132
+ 5: initialize the number of models with 1
133
+ 6: Publish model and training requirements
134
+ 7: end if
135
+ 8: Step 2: Receive vehicles information (transmit power, available data size, dataset diversity, CSI, velocity, preferred model)
136
+ 9: Schedule cluster-heads $H$ using Algorithm 1
137
+ 10: Step 3: Assign the remainder of vehicles (i.e., $NH$ ) to clusters using Algorithm 2
138
+ 11: Step 4:
139
+ 12: for vehicle $k\in NH$ do
140
+ 13: $k$ receives model $\theta_t$
141
+ 14: $k$ trains on local data $D_k$ for $\epsilon$ epochs
142
+ 15: $k$ sends updated model $\theta_k^{t+1}$ to MEC server
143
+ 16: end for
144
+ 17: for cluster head $h\in H$ do
145
+ 18: $h$ trains on local data $D_h$ for $\epsilon$ epochs
146
+ 19: $h$ receives model updates from vehicles in its cluster
147
+ 20: $h$ aggregates the model and sends new global model to MEC server
148
+ 21: end for
149
+ 22: Step 5:
150
+ 23: if $i = t_c$ then
151
+ 24: At step $i = t_c$ MEC server evaluates the similarities of the received models
152
+ 25: MEC server creates clusters based on the similarities and computes new global models using weighted average
153
+ 26: nodes receive new global models and evaluate their preferences
154
+ 27: end if
155
+ 28: aggregate updates
156
+ 29: start next round $i\gets i + 1$
157
+ 30: end for
158
+
159
+ Next, we present the formulations of the different elements in the system model, starting with the learning aspects (i.e., dataset characteristics and models similarity), to the different mobility and communication aspects considered throughout the proposed approach.
160
+
161
+ 1) Dataset characteristics: Considering the fact that datasets are non-i.i.d and unbalanced, a judicious cluster-head selection (Step 2) is necessary. In fact, each dataset can be characterized by how diverse its elements are, its size and how many times the model was trained on it (i.e., age of update). In this paper, we focus on the non-i.i.d and unbalanced aspect, however, other metrics can be considered depending on the learned task, including the quality of the datasets and their reliability. We set the value of each metric as [39]: $\varphi_j\gamma_j$ where $\gamma_{j}$ is the adjustable weight for each metric assigned
162
+
163
+ by the server and $\varphi_{j}$ is the normalized value of the metric $j$ . Using the aforementioned characteristics, the diversity index of dataset at node $k$ can be defined as:
164
+
165
+ $$
166
+ I _ {k} = \sum_ {j} \varphi_ {j, k} \gamma_ {j}, \tag {2}
167
+ $$
168
+
169
+ with $j \in \{ \text{elements diversity, dataset size, age} \}$ . The metric can be easily adjusted to include other task-specific considerations.
170
+
171
+ 2) Updates similarity: In order to handle the non-i.i.d aspect, the updates' similarity is evaluated using cosine similarity [9, 10] in Step 5 of the algorithm, and new models are created by aggregating the most similar models. Given two model updates $\Delta \theta_{k}$ and $\Delta \theta_{l}$ , the similarity is calculated according to:
172
+
173
+ $$
174
+ \operatorname {s i m} (k, l) = \frac {\left\langle \Delta \theta_ {k} , \Delta \theta_ {l} \right\rangle}{\left\| \theta_ {k} \right\| \left\| \theta_ {l} \right\|} \tag {3}
175
+ $$
176
+
177
+ where $\langle .,.\rangle$ is the dot product of two vectors. The dot product is divided by the product of the two vectors' lengths (or magnitudes). The values of $sim(.)$ are between 0 and 1, and the dissimilarity (i.e., cosine distance metric) $1 - sim(.)$ is used to cluster the updates. The cosine distance metric is invariant to scaling effects and therefore indicates how closely two vectors (and in our case updates) point in the same direction. The models' similarity is then used to created clusters using the hierarchical clustering algorithm [10], and the most similar models are aggregated to create new models.
178
+
179
+ 3) Vehicles Relationships: During the cluster formation in Step 3, each cluster is created based on the relationship between the vehicles. The definition of this relationship depends on whether only one global model is trained, or there are several versions of the model that are created. In the case of multiple models, we define the preference of a model through its accuracy on the $k$ th vehicle's dataset. We define the relationship between two vehicles $R_{k,h}$ as follows:
180
+
181
+ $$
182
+ R _ {k, h} = \left\{ \begin{array}{c c} \text {a c c u r a c y o f} h & \text {i f m o r e t h a n 1 m o d e l} \\ 1 & \text {o t h e r w i s e} \end{array} \right. \tag {4}
183
+ $$
184
+
185
+ # C. Communication Model
186
+
187
+ In Step 2, due to mobility and communication constraints, the RB allocation is jointly executed with the cluster-head selection. In fact, the mobility imposes a deadline for the upload based on the standing time of the vehicle. Additionally, in Step 3, the cluster formation must also consider the relationship between the vehicles in terms of mobility, which is modelled through the link lifetime (LLT). The different aspects of the communication model are formulated as follows:
188
+
189
+ 1) Standing time: While typically in FL, the duration of a communication round is fixed by the centralized entity (e.g., MEC server), the latency in FL in vehicular networks is dictated by the standing time of participating nodes. Let the diameter of coverage area of a gNodeB be denoted as $D$ . For each vehicle $k$ , the standing time in the coverage area of current gNodeB is defined by Eq. 5 [14]:
190
+
191
+ $$
192
+ T _ {k} = \frac {D - x _ {k}}{v _ {k}} \tag {5}
193
+ $$
194
+
195
+ To ensure the communication with the gNodeB, the rate of standing time of a vehicle $k$ selected as cluster-head should respect $(t_k^{train} + t_k^{up} + T_{agg} + \delta) \leq T_k$ . Where $t_k^{train}$ and $t_k^{up}$ are the estimated training time and upload time of vehicled $k$ respectively, $T_{agg}$ is the time required for aggregation and $\delta$ is a waiting time for the updates' collection. We can notice that what varies the most among the vehicles are $t_k^{train}$ and $t_k^{up}$ , as $t_k^{train}$ depends on the size of the dataset, and $t_k^{up}$ depends on the channel gain and the resource block allocation.
196
+
197
+ 2) Resource Blocks: For each vehicle $k$ , we can infer the maximum $t_k^{up}$ by setting $(t_k^{train} + t_k^{up} + T_{agg} + \delta) = T_k$ . As a result, we can determine the minimum required data rate $r_{k,min}$ to send an update of size $s$ within a transmission time of $t_k^{up}$ as follows:
198
+
199
+ $$
200
+ t _ {k} ^ {u p} = \frac {s}{r _ {k , m i n}}. \tag {6}
201
+ $$
202
+
203
+ The achievable data rate of a node $k$ over the RB $q$ is defined as follows:
204
+
205
+ $$
206
+ r _ {k} ^ {q} = B \log_ {2} \left(1 + \frac {P _ {k} G _ {k , q}}{N _ {0}}\right) \tag {7}
207
+ $$
208
+
209
+ where $B$ is the bandwidth of a RB, $P_{k}$ is the transmit power of node $k$ , and $N_{0}$ is the power spectral density of the Gaussian noise. The data rate of a vehicle is the sum of the datarates on all the RBs assigned to it.
210
+
211
+ 3) Link Lifetime: In Step 3, in order to associate a vehicle $k \in NH$ to a cluster-head $h \in H$ , it is necessary to evaluate the sustainability of the communication link, so as to ensure that the update of the node $k$ will be successfully sent to $h$ . Link Lifetime (LLT) [40] defines the link sustainability as the duration of time where two vehicles remain connected. LTT is defined in [40, 41, 42] by Eq. 8, for two vehicles $k$ and $h$ moving in the same or opposite directions. Assuming that the trajectory of all vehicular nodes to be a straight line, as the lane width is small, the y-coordinate can be ignored. We denote the positions of $k$ and $h$ by $x_{k}$ and $x_{h}$ , respectively.
212
+
213
+ $$
214
+ L L T _ {k, h} = \frac {- \Delta v _ {k h} \times D _ {k h} + | \Delta v _ {k h} | \times T R}{(\Delta v _ {k h}) ^ {2}} \tag {8}
215
+ $$
216
+
217
+ with $\Delta v_{kh} = v_k - v_h$ and $D_{kh} = x_k - x_h$ and $TR$ denotes the transmission range. Accordingly, the training time of $k$ and upload time from $k$ to $h$ must be less or equal to $LLT_{kh}$ (i.e., $(t_k^{train} + t_k^{up,h}) \leq LLT_{kh}$ .
218
+
219
+ # IV. PROBLEM FORMULATION & PROPOSED SOLUTION
220
+
221
+ # A. Problem Formulation
222
+
223
+ Considering the collaborative aspect of FL and the communication bottleneck, we define the following goals for the cluster-head selection and cluster association:
224
+
225
+ - From the perspective of accelerating learning and maximizing the representation, the scheduled cluster-heads must have diverse and large datasets, as a result the goal of cluster-head selection is:
226
+
227
+ $$
228
+ \max _ {h, \alpha} \sum_ {k = 1} ^ {K} h _ {k} I _ {k}. \tag {9}
229
+ $$
230
+
231
+ - In order to guarantee that each vehicle trains its preferred model, the cluster assignment can be defined as a matching problem where we aim to maximize the relationship $R_{k,h}$ .
232
+
233
+ $$
234
+ \max _ {m} \sum_ {h \in H} \sum_ {v \in N H} R _ {v, h} m _ {v, h} \tag {10}
235
+ $$
236
+
237
+ Several constraints related to communication are imposed by the vehicular environment. Consequently, the first problem considered is a joint cluster-head selection and RB allocation. For each vehicle $k$ and RB $q$ we define $\alpha_{k,q}$ as:
238
+
239
+ $$
240
+ \alpha_ {k, q} = \left\{ \begin{array}{l l} 1 & \text {i f} q \text {i s a s s i g n e d t o k} \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {11}
241
+ $$
242
+
243
+ The cluster-head selection and RB allocation problem is formulated as follows:
244
+
245
+ $$
246
+ \underset {h, \alpha} {\text {m a x i m i z e}} \quad \sum_ {k = 1} ^ {K} h _ {k} I _ {k} \tag {12a}
247
+ $$
248
+
249
+ subject to
250
+
251
+ $$
252
+ \left(t _ {k} ^ {\text {t r a i n}} + \delta + t _ {k} ^ {\text {u p}} + T _ {\text {a g g}}\right) h _ {k} \leq T _ {k}, \quad \forall k \in [ 1, K ], \tag {12b}
253
+ $$
254
+
255
+ $$
256
+ \sum_ {k = 1} ^ {K} \alpha_ {k} \leq \text {T o t a l} _ {R B}, \quad \forall k \in [ 1, K ], \tag {12c}
257
+ $$
258
+
259
+ $$
260
+ h _ {k} \in \{0, 1 \}, \quad \forall k \in [ 1, K ]. \tag {12d}
261
+ $$
262
+
263
+ Taking into account the results from the previous problem, we define $H = \{k, h_k = 1\}$ (i.e., the cluster-heads) and $NH = \{k, h_k = 0\}$ (i.e., the remainder of the vehicles). The next step is matching the set of vehicles $NH$ to selected cluster-heads $H$ . We consider that a maximum capacity $N_{max}$ is fixed for each cluster in order to reasonably allocate the V2V communication resources. Additionally, if a vehicle $\nu$ is to be matched with a cluster-head, it needs to respect the time constraints, where it should be able to finish training and uploading before a deadline $T_h = t_h^{train} + \delta$ , and the $LLT\nu, h$ should at least outlast the training and upload. We define $m_{\nu, h}$ as a binary variable equal to 1 if $\nu$ is matched with $h$ and 0 otherwise. Accordingly, we define the second problem as follows:
264
+
265
+ $$
266
+ \underset {m} {\text {m a x i m i z e}} \quad \sum_ {h \in H} \sum_ {v \in N H} R _ {v, h} m _ {v, h} \tag {13a}
267
+ $$
268
+
269
+ subject to
270
+
271
+ $$
272
+ \sum_ {h \in H} m _ {v, h} \leq 1, \quad \forall v \in N S, \tag {13b}
273
+ $$
274
+
275
+ $$
276
+ \sum_ {v \in N H} m _ {v, h} \leq N _ {\max }, \quad \forall v \in N S, \tag {13c}
277
+ $$
278
+
279
+ $$
280
+ \left(t _ {v} ^ {\text {t r a i n}} + t _ {v} ^ {\text {u p}}\right) m _ {v}, h \leq L L T _ {v, h}, \quad \forall v \in N H, \tag {13d}
281
+ $$
282
+
283
+ $$
284
+ \left(t _ {v} ^ {\text {t r a i n}} + t _ {v} ^ {\text {u p}}\right) m _ {v}, h \leq T _ {h}, \quad \forall v \in N H, \tag {13e}
285
+ $$
286
+
287
+ $$
288
+ m _ {v, h} \in \{0, 1 \}, \quad \forall v \in N H. \tag {13f}
289
+ $$
290
+
291
+ # B. Proposed Algorithm
292
+
293
+ In this section, we present our proposed solution for clusterhead selection and RB allocation alongside the matching algorithm to solve (12) and (13). The challenging aspect of the
294
+
295
+ problem (12) is that it requires maximizing the weighted sum of the selected vehicles and jointly allocating the bandwidth. A restricted version of problem (12) can be shown to be equivalent to a knapsack problem and thus it is NP-hard [43]. In fact, the problem aims to select vehicles that maximize the weighted sum $\sum_{k} I_{k} h_{k}$ subject to a knapsack capacity given by $\sum_{k} \alpha_{k} \leq \text{Total}_{RB}$ in constraint (12c), which can be transformed to $\sum_{k} \alpha_{k} h_{k} \leq \text{Total}_{RB}$ where $\alpha_{k}$ represent the weight of item $k$ (fixed for this restricted version) and $\text{Total}_{RB}$ represents the knapsack capacity. Thus, the problem is equivalent to a knapsack problem and since the latter is NP-hard, so is problem (12). Constraint (12b) can be verified for each vehicle to filter out the ones that cannot upload the updates in time.
296
+
297
+ We chose to follow a greedy knapsack algorithm to solve the problem. In fact, we chose the greedy approach because it will allow us to select the best candidates with an optimal RB cost, unlike the ranked list solution, which would have optimized the sum of $I_{k}$ only [44]. Furthermore, the greedy knapsack algorithm has low complexity and will allow fast and efficient scheduling under the rapidly changing vehicular environment. We calculate the minimum required RBs for each vehicle $k$ to be able to send the update by the deadline $T_{k}$ , which we consider the cost of the scheduling $c_{k} = \sum_{q\in RBs}\alpha_{k,q}$ . The main time consuming step is the sorting of all vehicles in a decreasing order based on their diversity value / cost in RBs ratio. After the vehicles are arranged as an ordered list, the following loop takes $O(n)$ time. Taking into account that the worst-case time complexity of sorting can is $O(n\log n)$ , the total time complexity of the proposed greedy algorithm is $O(n\log n)$ .
298
+
299
+ The second formulated problem (13) is a maximum weighted bipartite matching problem [45, 46], where each $h \in H$ has a maximum capacity $N_{max}$ and each $\nu \in NH$ has a capacity of 1.
300
+
301
+ In order to include the remainder of the constraints, we define $\zeta_{\nu,h}$ as a binary value, where $\zeta_{\nu,h} = 0$ if constraint (13d) cannot be satisfied if $m_{\nu,h} = 1$ , and $\zeta_{\nu,h} = 1$ otherwise. The goal is redefined so as to maximize a weighted sum of $R_{\nu,h} \times \zeta_{\nu,h}$ . The problem becomes an integer linear program (ILP) and solved using an off-the-shelf ILP solver (e.g., Python's PulP [47]).
302
+
303
+ To illustrate the problem, we consider the example in Fig.2. The vehicles and their relationships can be considered as a graph, where the vehicles represent the edges and their relationship is represented through the vertices, which are weighted with $R_{\nu,h} \times \zeta_{\nu,h}$ . The goal is to find a subgraph where the selected vertices have an optimal (in our case maximum) sum. The remaining constraints are the maximum capacities of the vehicles (in red). The cluster-heads (in yellow, on the right) have a maximum capacity $N_{max} = 3$ each (Constraint 13c), and the other vehicles have capacity of 1 (Constraint 13b). In the illustrated problem, the pairs $\nu_2, h_2$ and $\nu_3, h_1$ cannot be matched since the edges (in dashes lines) have null values, which can be either due to possible disconnection or of poor model performance. The choice of the optimal matching is then left among the remaining pairs. The optimal solution for the illustrated problem in yellow lines has a sum of 3.0.
304
+
305
+ ![](images/52850b49dd0347f02fe8af879938617eaec6ca0490da9996dff135c191994347.jpg)
306
+ Fig. 2: Illustration of the Matching problem
307
+
308
+ We define our Algorithm 2, Clustered Vehicular FL (CVFL) that iteratively selects nodes with best ratio $\frac{I_k}{c_k}$ to be cluster heads, and then matches the rest of the vehicles to them after verifying the time constraints by creating clusters that maximize $\sum_{h\in H}\sum_{\nu \in NH}R_{\nu ,h}\times \zeta_{\nu ,h}$ .
309
+
310
+ # V. PERFORMANCE EVALUATION
311
+
312
+ # A. Simulation Environment and Parameters
313
+
314
+ The simulations were conducted on a desktop computer with a 2,6 GHz Intel i7 processor and 16 GB of memory and NVIDIA GeForce RTX 2070 Super graphic card. We used Pytorch [48] for the machine learning library. In the following numerical results, each presented value is the average of multiple independent runs.
315
+
316
+ Datasets: We used benchmark image classification datasets MNIST [49], a handwritten digit images, and FashionMNIST [50], grayscale fashion products dataset, which we distribute randomly among the simulated devices. MNIST and FashionMNIST constitute simple yet flexible tasks to test various clustered settings and data partitions. Each dataset contains 60,000 training examples and 10,000 test examples. The data partition is designed specifically to illustrate various ways in which data distributions might differ between vehicles. The data partition we adopted is as follows: We first sort the data by digit label, then we form 1200 shards composed of 50 images each. Each shard is composed of images from one class, i.e. images of the same digit. In the beginning of every simulation run, we randomly allocate a minimum of 1 shard and a maximum of 30 shards to each of the $K$ vehicles. This method of allocation allows us to create an unbalanced and non-i.i.d distribution of the dataset, which is varied in each independent run.
317
+
318
+ Furthermore, in order to evaluate the updates' clustering and how adequate is the preferences' evaluation, we partition the vehicles' indexes into $N_{shifts}$ groups. For each group two digit labels are swapped. For instance, one group might swap all digits labelled as 1 to 7 and vice versa. The swapped tuples are: $\{(1,7),(3,5)\}$ for MNIST and $\{(1,3),(6,0)\}$ for
319
+
320
+ Algorithm 2 Clustered Vehicular Federated Learning (CVFL)
321
+ Input A queue of $K$ vehicles total available resource blocks TotalRB; Output $\alpha ,h = [h_1,\dots ,h_K]$ . 1: // Cost Evaluation 2: for $k = 1,\ldots ,K$ do 3: $r_k = 0,c = 1$ . 4: order the RBs using $r_k,q$ . 5: while $r_k\leq r_{k,min}$ and $c\leq Total_{RB}$ do 6: $q^{*}\gets \arg \max_{q\in Z}G_{k,q}$ . 7: $r_k\gets r_k + r_{k,q}$ . 8: $c\gets c + 1$ . 9: $c_{k}\leftarrow c;$ 10: end while 11: end for 12: return $C = [c_1,\dots ,c_K]$ 13: // RB Allocation 14: order vehicles according to their ratio $(L = [\frac{I_k}{c_k}\forall k])$ decreasingly; 15: for $k = 1\dots K$ do $h_k\gets 0$ . 16: end for 17: $A\gets Z$ . 18: $k\gets \arg \max (L)$ . 19: while $A\neq \emptyset$ do 20: order the RBs using $r_k,q$ . 21: while $r_k\leq r_{k,min}$ and $c\leq Total_{RB}$ do 22: $q^{*}\gets \arg \max_{q\in A}G_{k,q}$ . 23: $r_k\gets r_k + r_{k,q}$ . 24: $\alpha_{k,q}\gets 1$ . 25: $A\gets A\setminus \{q\}$ . 26: end while 27: $h_k\gets 1$ . 28: end while 29: return $h$ and $\alpha$ 30: // Matching. 31: Use $h$ to form $H$ and $NH$ sets; 32: Infer values of $R_{k,h}\forall k\in NH,h\in H$ . 33: Estimate $LLT_{k,h}\forall k\in NH,h\in H$ . 34: verify time constraints and calculate $\zeta$ . 35: Solve matching problem using Maximum weight bi-partite matching algorithm [45] using off the shelf solver such as Python's PulP [47]. 36: Uniformly allocate the RBs of V2V links to the associated vehicles.
322
+
323
+ FashionMNIST [51]. Each group is then evenly distributed to $\frac{K}{N_{shifts}}$ . This partition allows us to test the proposed algorithm's ability to train models in the presence of concept shift and unbalanced data. The test set is divided into $N_{shifts}$ datasets and the average accuracy is then reported.
324
+
325
+ # FL Parameters:
326
+
327
+ We consider $K = 30$ vehicles collaboratively training multi-layer perceptron (MLP) model with two hidden layers (64 neurons in each), and a convolutional neural network (CNN) model with two 5x5 convolution layers (the first with 10
328
+
329
+ channels, the second with 20, each followed with $2 \times 2$ max pooling), two fully connected layers with 50 units and ReLu activation, and a final softmax output layer. We use lightweight models as they can be realistically trained on end-devices in rapidly changing environments. For each participant, due to the mobility of the vehicles and in order to collect a maximum number of updates, it is more practical to choose a small number of local epochs, as a result, in the following simulations, the number of local epochs is set to $\epsilon = 1$ . In the preliminary evaluations, the maximum number of communication rounds is $i_{max} = 30$ . The clustering is set in round 25. $t_{k,train}$ for each vehicle is calculated locally using our configuration.
330
+
331
+ # B. Preliminary evaluations: Parking Lot Scenario
332
+
333
+ In this part of the evaluations, we focus on the learning aspect by studying the proposed algorithm in less constrained environment.
334
+
335
+ 1) Simple unbalanced and non-i.i.d distribution: In this part of the simulation, we ignore the constraint of LLT in problem (13) as the velocities are set to 0. The results in Fig.3 show that a significant improvement is reached through the use of V2V communication. With more participation, we also noticed that the training tends to be more stable with the loss function steadily declining in comparison to standard FL. Furthermore, higher accuracy scores are achieved by our proposed method. While the average local accuracy after the end of the training the MLP on MNIST is $80\% \pm 10\%$ for vanilla FL, it reaches and average of $82\% \pm 9\%$ for our proposed approach. Similarly, on FashionMNIST the results $66.79\% \pm 10\%$ with vanilla FL and $68.74\% \pm 9\%$ . Owing to its high suitability for image processing tasks, the CNN model yielded higher results as the vanilla FL reached $94.58\% \pm 7\%$ and our proposed method achieved $95.5\% \pm 5\%$ . Such results can be considered as a baseline values in perfect conditions for the subsequent experiments as we can reflect on the robustness of CVFL under mobility and concept-shift. Based on these preliminary results, we expect to see more differences and variance in the results for the MLP model compared to the CNN model. We also can expect a better performance for the MLP model on the MNIST dataset compared to FashionMNIST.
336
+
337
+ 2) Unbalanced and non-i.i.d distribution with concept shift: The presence of concept-shift requires the clustering phase in order to improve the final results. In these simulations, we fixed the number of maximum clusters to 2, and studied the effect of partial participation on the clustering. Given the presence of concept shift for 4 out of 10 digits, we expect the accuracy to be around $60\%$ .
338
+
339
+ To study the effect of the fraction of participants in the partial clustering phase, we run multiple independent runs for each fraction in $\{20\%, 60\%, 100\}$ . The results are shown in Fig.4. For both vanilla FL and the proposed partial clustering approach, the number of participants in each round is 6. For the standard FL, the average accuracy is $65\%$ , while For $20\%$ the average $68\%$ $(+3\%)$ and for $60\%$ the average is $69\%$ $(+5\%)$ . It should be noted that the dissimilarity of the updates is harder to detect as only 2 out of 10 digits are swapped for each group.
340
+
341
+ ![](images/9b25646911c91e277a329f8c4c7922e727b9c3a6121e5ba1408e306050a929ed.jpg)
342
+ Fig. 3: Preliminary results on non-i.i.d and unbalanced data without concept-shift
343
+
344
+ TABLE II: Generated Values
345
+
346
+ <table><tr><td>Vehicle Antenna height</td><td>1.5m</td></tr><tr><td>Vehicle antenna again</td><td>3dBi</td></tr><tr><td>Shadowing distribution</td><td>Log-normal</td></tr><tr><td>Shadowing standard deviation</td><td>3 db</td></tr><tr><td>Noise power N0</td><td>-114 dBm</td></tr><tr><td>Fast fading</td><td>Rayleigh fading</td></tr><tr><td>Transmit Power</td><td>0.1 Watt</td></tr><tr><td>Vehicles generation model</td><td>Spatial Poisson Process</td></tr><tr><td>Velocities generation model</td><td>Truncated Gaussian</td></tr><tr><td>Model Size</td><td>160 kbits</td></tr><tr><td>Bandwidth/ RB</td><td>180 Khz</td></tr><tr><td>Nmax</td><td>2</td></tr><tr><td>Total RBs</td><td>4</td></tr><tr><td>δ</td><td>2s</td></tr></table>
347
+
348
+ # C. Freeway scenario
349
+
350
+ We consider that the $K = 30$ vehicles are randomly distributed on 6 lanes on a radius $D = 2km$ . The vehicular communication model parameters and mobility are based on parameters in [52] and are summarized in Table II. The velocities of vehicles are assumed to be i.i.d, and they are generated by a truncated Gaussian distribution. In contrast to the normal Gaussian distribution or constant values, the truncated Gaussian distribution is more realistic for modelling vehicles' speed as it can generate different values in a certain limited range. This assumption is widely adopted in many state-of-the-art works of vehicular networks [14]. The lower and upper bounds for the velocity values on the 3 lanes going in the same direction are (60, 80), (80, 100), (100, 120) km/h.
351
+
352
+ 1) Key Performance results: In this part of the evaluation, we vary the model size and the number of RBs in order to evaluate how the CVFL algorithms adapts to different training and upload requirements. We evaluated how the number of selected cluster-heads and how the total number of participants change in each scenario. We also evaluated how the average running time of the matching algorithm when the number of participants varies.
353
+
354
+ Table-III shows the average number of cluster-heads selected in each communication round and Table-IV shows
355
+
356
+ the average number of participants in each communication round. It is clear from the results that the number of RBs is the defining factor of the number of cluster-heads and consequently the number of participants. The results also show that the proposed algorithm can safely scale up to handle large models or more local epochs in the case of small models.
357
+
358
+ TABLE III: Average Number of cluster heads in each communication round
359
+
360
+ <table><tr><td></td><td colspan="3">Model Size in Kbits</td></tr><tr><td>Number of RBs</td><td>160</td><td>320</td><td>640</td></tr><tr><td>2</td><td>2.43 ± 0.26</td><td>2.39 ± 0.30</td><td>2.39 ± 0.24</td></tr><tr><td>3</td><td>4.13 ± 0.57</td><td>4.056 ± 0.51</td><td>4.216 ± 0.48</td></tr><tr><td>4</td><td>5.565 ± 0.69</td><td>5.504 ± 0.71</td><td>5.568 ± 0.54</td></tr></table>
361
+
362
+ TABLE IV: Average Number of participants in each run
363
+
364
+ <table><tr><td></td><td colspan="3">Model Size in Kbits</td></tr><tr><td>Number of RBs</td><td>160</td><td>320</td><td>640</td></tr><tr><td>2</td><td>7.28 ± 0.80</td><td>7.16 ± 0.91</td><td>7.168 ± 0.74</td></tr><tr><td>3</td><td>12.26 ± 12.05</td><td>1.36 ± 0.51</td><td>12.44 ± 1.39</td></tr><tr><td>4</td><td>16.00 ± 1.46</td><td>15.81 ± 1.47</td><td>16.04 ± 1.24</td></tr></table>
365
+
366
+ TABLE V: Average Running time of the matching algorithm
367
+
368
+ <table><tr><td>Number of vehicles</td><td>Average CPU time (s)</td></tr><tr><td>25</td><td>0.02</td></tr><tr><td>50</td><td>0.03</td></tr><tr><td>75</td><td>0.03</td></tr><tr><td>100</td><td>0.03</td></tr><tr><td>125</td><td>0.04</td></tr></table>
369
+
370
+ Calculating the analytical expression of time complexity of the ILP-based algorithm used for the matching is not obvious since the low-level implementation details of the solver are not available to us. However, we evaluated the running time in different settings with varying the number of nodes to see how it scales with large number of participants. The average running time values in seconds on our machine are summarized in Table-V. In general, the matching algorithm can easily handle large pools of participants without high impact on the execution time.
371
+
372
+ 2) Effect on the accuracy: To study the proposed approach in a mobility scenario, we first studied a simple case of unbalanced and non-i.i.d distribution, then we stress tested CVFL under concept-shift. The number of available RBs in each communication round is limited to 4, and the simulations were conducted for $i_{max} = 50$ communication rounds.
373
+
374
+ Fig. 5 shows the results for unbalanced and non-i.i.d distribution in the mobility scenario.
375
+
376
+ Owing to larger numbers of participants (see Table IV), higher accuracy values are obtained across the experiments. CVFL achieves accuracy of $87\% \pm 4\%$ in contrast to $85\% \pm 5\%$ for the standard FL under the same settings training MLP model on MNIST, and the CNN model achieves similar results for both CVFL ( $95\% \pm 5\%$ ) and vanilla FL ( $94\% \pm 7.5\%$ ). The average accuracy values on FashionMNIST is $69.66\% \pm 9\%$ for CVFL and $66.46\% \pm 10\%$ for vanilla FL. The larger values of the standard deviation of the results in vanilla FL across the experiments in this case is possibly due to the smaller number of participants in each round compared to CVFL where almost half of the vehicles train their models which provides more consistency throughout the experiments.
377
+
378
+ ![](images/f11608b3c6d5231eac26a51b3cb3e6f5343f79cb2baa5b423f53503a18574bec.jpg)
379
+ (a) Fraction $= 20\%$
380
+
381
+ ![](images/ef158f541232d2d6ae7efb852fd8cf03c40d62084eb71bb3b31d89d05c45e058.jpg)
382
+ (b) Fraction $= 60\%$
383
+
384
+ ![](images/39cdcceeb54346a247e0db93ef059fa9e713983a1e4aec1fa5795fb8577b579b.jpg)
385
+ (c) Fraction $= 100\%$
386
+ Fig. 4: The importance of the fraction of the participants in the clustering step under concept-shift
387
+
388
+ ![](images/128298b4bcbc5cedd69b3b7c0dcd0ec2bdc43ef81d620d33b6adcd0e603ea9dc.jpg)
389
+ (a) MNIST - MLP
390
+
391
+ ![](images/e4b60af9e4f11e2b50209f18f38baa2df8438abab064b2198426e4e3d3874734.jpg)
392
+ (b) MNIST - CNN
393
+
394
+ ![](images/a71772e238e5c52a8d5d52efaa88c6064355fa77e785195f40928bd7f2744596.jpg)
395
+ (c) FashionMNIST-MLP
396
+ Fig. 5: Evaluation of CVFL when the relationship is defined through mobility only
397
+
398
+ ![](images/2f7c8e05dfb628e17886457ffd9c5a6d0802d9c7d91033c8214824c438086a19.jpg)
399
+ (a) MNIST - MLP
400
+
401
+ ![](images/f2b6f67456dda2fcb4807ad6d858b115239eb0ac9dd281d70454c41942488516.jpg)
402
+ (b) MNIST - CNN
403
+ Fig. 6: Evaluation of CVFL under concept shift
404
+
405
+ ![](images/2caae6d3b4ca833db8904893cd4e31ab4870f46282ff6eb109ac5b428057bbd8.jpg)
406
+ (c) FashionMNIST-MLP
407
+
408
+ The second set of simulation runs are on unbalanced and non-i.i.d distribution with concept shift. Fig.6 shows how the models performed under these conditions in a freeway setting. Overall, accuracy values are significantly less than the obtained values in datasets where there is not concept shift. More specifically, the average accuracy of the MLP model achieved in the 50th round on MNIST dataset is $68\% \pm 9\%$ in contrast to $65\% \pm 7\%$ for vanilla FL. The CNN model yielded identical results for CVFL ( $80\% \pm 9\%$ ) and vanilla FL ( $80\% \pm 5\%$ ). The larger values of the standard deviations in CVFL are due to the fact that resulting models after clustering often perform differently on the test sets. The concept-shift appears to affect the accuracy on FashionMNIST in a higher level, as
409
+
410
+ the accuracy drops to around $55\%$ for both CVFL and vanilla FL. In contrast to the previous experiments, the difference is low in later rounds because only a small fraction of users participate in the clustering step. This can be overcome though the introduction of more communication rounds on the same version of the model in order to collect more updates. Additionally, the gaps in accuracy values are high in the earlier rounds of communication before the clustering round. The reason for the gap's narrowing is that new models are created and a smaller number of clients train the same model. As the number of clients training the models in each round constitutes a key factor for the convergence speed, we suspect that it might be the reason.
411
+
412
+ # VI. LIMITATIONS AND FUTURE WORK
413
+
414
+ Through this work, we have identified several potential future research directions and open issues that are worthwhile being explored.
415
+
416
+ - Large-scale collaboration: Extending the proposed model to take into account handover between base stations etc in order to enable continuous training throughout vehicles' trips and reduce lost updates. Furthermore, fully decentralized training can be implemented for areas with low coverage, while also taking into consideration model convergence.
417
+ - Adversarial attacks and outliers: The updates' clustering is useful to detect local models that diverge from the majority of the received updates. This step can be furthered exploited to eliminate outliers and adversaries. Additionally, due to the collaborative and hierarchical nature of the proposed approach, trust among vehicles and reliability of their models can be further enhanced through traceability and incentive/punishment mechanisms [53].
418
+ - Experimental values: Set thresholds concerning LLT and rate of stay through experimental/ real data traces. Other values related to training can also be adjusted dynamically, such as the number of local epochs and the batch size.
419
+ - Enhance Privacy: While FL can provide some privacy concerning the raw data of each user, the model updates can be reverse-engineered to reveal sensitive information about the users. Several techniques such as Differential privacy can be used to enhance the privacy-preservation in FL in vehicular environments.
420
+
421
+ # VII. CONCLUSION
422
+
423
+ In this paper, we have investigated the problem of clustered FL in vehicular networks. We aimed to fill the gap between clustering in vehicular networks and clustering in FL by designing a mobility-aware learning process for clustered FL. In the proposed architecture, we consider the v2v communication as an asset to overcome the communication bottleneck of FL in vehicular networks. Accordingly, in each communication round, a subset of vehicles are selected to act as cluster-heads, and the remainder of vehicles are matched the them. The selection favors vehicles with diverse datasets and good wireless communication channels with the gNodeB. Furthermore, clustering based on the similarity of the updates is introduced to subdue the slow convergence of single joint FL model in non-i.i.d settings, especially in the presence of concept-shift. This step leads to the creation of new models which are sent to the non-participants and newly joint vehicles, who will evaluate them and score their preferences of these models. The resulting preference values are used to match each vehicle to their preferred model (cluster-head). Both the cluster-head selection and cluster matching are formulated as optimization problems with learning goals and mobility constraints. We have proposed a greedy algorithm for the selection and RB allocation of cluster-heads, and a maximum weighted bipartite matching algorithm for the cluster formation. Simulations show the efficacy of using V2V communication to accelerate
424
+
425
+ the learning as well as the importance of clustering based on updates to control concept shift. In the future, we aim to make the proposed approach resilient to outliers and malicious attacks such as false data injection.
426
+
427
+ # ACKNOWLEDGMENTS
428
+
429
+ The authors would like to thank the Natural Sciences and Engineering Research Council of Canada, for the financial support of this research.
430
+
431
+ # REFERENCES
432
+
433
+ [1] E. Yurtsever, J. Lambert, A. Carballo, and K. Takeda, "A Survey of Autonomous Driving: Common Practices and Emerging Technologies," IEEE Access, vol. 8, pp. 58443-58469, 2020, iEEE Access.
434
+ [2] S. Grigorescu, B. Trasnea, T. Cocias, and G. Macesanu, "A survey of deep learning techniques for autonomous driving," Journal of Field Robotics, vol. 37, no. 3, pp. 362-386, 2020, _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1002/rob.21918. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/rob.21918
435
+ [3] Y. Lu, X. Huang, K. Zhang, S. Maharjan, and Y. Zhang, "Blockchain Empowered Asynchronous Federated Learning for Secure Data Sharing in Internet of Vehicles," IEEE Transactions on Vehicular Technology, vol. 69, no. 4, pp. 4298-4311, Apr. 2020.
436
+ [4] L. Szabó, L. Lindenmaier, and V. Tihanyi, "Smartphone Based HD Map Building for Autonomous Vehicles," in 2019 IEEE 17th World Symposium on Applied Machine Intelligence and Informatics (SAMI), Jan. 2019, pp. 365-370.
437
+ [5] S.-W. Kim and W. Liu, "Cooperative Autonomous Driving: A Mirror Neuron Inspired Intention Awareness and Cooperative Perception Approach," IEEE Intelligent Transportation Systems Magazine, vol. 8, no. 3, pp. 23-32, 2016.
438
+ [6] A. Filali, A. Abouaomar, S. Cherkaoui, A. Kobbane, and M. Guizani, "Multi-Access Edge Computing: A Survey," IEEE Access, vol. 8, pp. 197017-197046, 2020.
439
+ [7] A. Abouaomar, S. Cherkoui, Z. Mlika, and A. Kobbane, "Service function chaining in mec: A mean-field game and reinforcement learning approach," 2021.
440
+ [8] J. Konečný, H. B. McMahan, D. Ramage, and P. Richtárik, "Federated Optimization: Distributed Machine Learning for On-Device Intelligence," arXiv:1610.02527 [cs], Oct. 2016. [Online]. Available: http://arxiv.org/abs/1610.02527
441
+ [9] F. Sattler, K.-R. Müller, and W. Samek, "Clustered Federated Learning: Model-Agnostic Distributed Multitask Optimization Under Privacy Constraints," IEEE Transactions on Neural Networks and Learning Systems, pp. 1-13, 2020.
442
+ [10] C. Briggs, Z. Fan, and P. Andras, "Federated learning with hierarchical clustering of local updates to improve training on non-IID data," Jul. 2020, pp. 1-9, iSSN: 2161-4407.
443
+ [11] M. Azizian et al., "Dcev: A distributed cluster formation for vanet based on end-to-end reactive mobility," in 2016 International Wireless Communications and Mobile Computing Conference (IWCMC), 2016, pp. 287-291.
444
+ [12] B. Yang, X. Cao, K. Xiong, C. Yuen, Y. L. Guan, S. Leng, L. Qian, and Z. Han, "Edge Intelligence for Autonomous Driving in 6G Wireless System: Design Challenges and Solutions," IEEE Wireless Communications, vol. 28, no. 2, pp. 40-47, Apr. 2021, iEEE Wireless Communications.
445
+ [13] T. Zeng, O. Semiari, M. Chen, W. Saad, and M. Bennis, "Federated Learning on the Road: Autonomous Controller Design for Connected and Autonomous Vehicles," arXiv:2102.03401 [cs, eess], Feb. 2021, arXiv: 2102.03401. [Online]. Available: http://arxiv.org/abs/2102.03401
446
+ [14] Z. Yu, J. Hu, G. Min, Z. Zhao, W. Miao, and M. S. Hossain, "Mobility-Aware Proactive Edge Caching for Connected Vehicles Using Federated Learning," IEEE Transactions on Intelligent Transportation Systems, pp. 1-11, 2020.
447
+ [15] Y. Zhu, S. Zhang, Y. Liu, D. Niyato, and J. J. Yu, "Robust federated learning approach for travel mode identification from non-iidgps trajectories," in 2020 IEEE 26th International Conference on Parallel and Distributed Systems (ICPADS), 2020, pp. 585-592.
448
+ [16] W. Y. B. Lim, N. C. Luong, D. T. Hoang, Y. Jiao, Y.-C. Liang, Q. Yang, D. Niyato, and C. Miao, "Federated Learning in Mobile Edge Networks: A Comprehensive Survey," IEEE Communications Surveys Tutorials,
449
+
450
+ vol. 22, no. 3, pp. 2031-2063, 2020, iEEE Communications Surveys Tutorials.
451
+ [17] M. Aledhari, R. Razzak, R. M. Parizi, and F. Saeed, “Federated Learning: A Survey on Enabling Technologies, Protocols, and Applications,” IEEE Access, vol. 8, pp. 140-699-140-725, 2020, iEEE Access.
452
+ [18] A. Imteaj, U. Thakker, S. Wang, J. Li, and M. H. Amini, “A Survey on Federated Learning for Resource-Constrained IoT Devices,” IEEE Internet of Things Journal, pp. 1-1, 2021, iEEE Internet of Things Journal.
453
+ [19] A. M. Elbir, B. Soner, and S. Coleri, “Federated Learning in Vehicular Networks,” arXiv:2006.01412 [cs, eess, math], Sep. 2020, arXiv: 2006.01412. [Online]. Available: http://arxiv.org/abs/2006.01412
454
+ [20] M. Azizian et al., "Vehicle software updates distribution with sdn and cloud computing," IEEE Communications Magazine, vol. 55, no. 8, pp. 74-79, 2017.
455
+ [21] ——, "An optimized flow allocation in vehicular cloud," IEEE Access, vol. 4, pp. 6766-6779, 2016.
456
+ [22] I. Jabri, T. Mekki, A. Rachedi, and M. Ben Jemaa, "Vehicular fog gateways selection on the internet of vehicles: A fuzzy logic with ant colony optimization based approach," Ad Hoc Networks, vol. 91, p. 101879, Aug. 2019. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1570870518308096
457
+ [23] I. Al Ridhawi, M. Aloqaily, B. Kantarci, Y. Jararweh, and H. T. Mouftah, "A continuous diversified vehicular cloud service availability framework for smart cities," Computer Networks, vol. 145, pp. 207-218, Nov. 2018. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1389128618308430
458
+ [24] I. Tal and G.-M. Muntean, "Towards Smarter Cities and Roads: A Survey of Clustering Algorithms in VANETs," 2014, iSBN: 9781466659780 Pages: 16-50 Publisher: IGI Global.
459
+ [25] C. Cooper, D. Franklin, M. Ros, F. Safaei, and M. Abolhasan, "A Comparative Survey of VANET Clustering Techniques," IEEE Communications Surveys Tutorials, vol. 19, no. 1, pp. 657-681, 2017, iEEE Communications Surveys Tutorials.
460
+ [26] M. Azizian et al., "A distributed d-hop cluster formation for vanet," in 2016 IEEE Wireless Communications and Networking Conference, 2016, pp. 1-6.
461
+ [27] ——, “A distributed cluster based transmission scheduling in vanet,” in 2016 IEEE International Conference on Communications (ICC), 2016, pp. 1-6.
462
+ [28] D. Singh, Ranvijay, and R. S. Yadav, "NWCA: A New Weighted Clustering Algorithm to form Stable Cluster in VANET," in Proceedings of the Second International Conference on Information and Communication Technology for Competitive Strategies, ser. ICTCS '16. New York, NY, USA: Association for Computing Machinery, Mar. 2016, pp. 1-6. [Online]. Available: https://doi.org/10.1145/2905055.2905226
463
+ [29] A. Daeinabi, A. G. Pour Rahbar, and A. Khademzadeh, "VWCA: An efficient clustering algorithm in vehicular ad hoc networks," Journal of Network and Computer Applications, vol. 34, no. 1, pp. 207-222, Jan. 2011. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1084804510001384
464
+ [30] Y. Kim, E. A. Hakim, J. Haraldson, H. Eriksson, J. M. B. d. Silva Jr., and C. Fischione, "Dynamic Clustering in Federated Learning," arXiv:2012.03788 [cs], Dec. 2020, arXiv: 2012.03788. [Online]. Available: http://arxiv.org/abs/2012.03788
465
+ [31] A. Ghosh, J. Chung, D. Yin, and K. Ramchandran, "An Efficient Framework for Clustered Federated Learning," arXiv:2006.04088 [cs, stat], Jun. 2021, arXiv: 2006.04088. [Online]. Available: http://arxiv.org/abs/2006.04088
466
+ [32] P. Kairouz et al., "Advances and Open Problems in Federated Learning," arXiv:1912.04977 [cs, stat], Mar. 2021, arXiv:1912.04977. [Online]. Available: http://arxiv.org/abs/1912.04977
467
+ [33] A. Tak and S. Cherkouoi, “Federated Edge Learning: Design Issues and Challenges,” IEEE Network, vol. 35, no. 2, pp. 252-258, Mar. 2021.
468
+ [34] Y. Mansour, M. Mohri, J. Ro, and A. T. Suresh, “Three Approaches for Personalization with Applications to Federated Learning,” arXiv:2002.10619 [cs, stat], Jul. 2020, arXiv:2002.10619. [Online]. Available: http://arxiv.org/abs/2002.10619
469
+ [35] Z. Chen, P. Tian, W. Liao, and W. Yu, “Zero Knowledge Clustering Based Adversarial Mitigation in Heterogeneous Federated Learning,” IEEE Transactions on Network Science and Engineering, vol. 8, no. 2, pp. 1070–1083, Apr. 2021, iEEE Transactions on Network Science and Engineering.
470
+ [36] A. Taik, H. Moudoud, and S. Cherkououi, "Data-Quality Based Scheduling for Federated Edge Learning," in 2021 IEEE 46th Conference on Local Computer Networks (LCN), Oct. 2021, pp. 17-23, iSSN: 0742-1303.
471
+
472
+ [37] L. Liu, J. Zhang, S. Song, and K. B. Letaief, "Client-Edge-Cloud Hierarchical Federated Learning," in ICC 2020 - 2020 IEEE International Conference on Communications (ICC), Jun. 2020, pp. 1-6, iSSN: 1938-1883.
473
+ [38] H. Chai, S. Leng, Y. Chen, and K. Zhang, "A Hierarchical Blockchain-Enabled Federated Learning Algorithm for Knowledge Sharing in Internet of Vehicles," IEEE Transactions on Intelligent Transportation Systems, vol. 22, no. 7, pp. 3975-3986, Jul. 2021, iEEE Transactions on Intelligent Transportation Systems.
474
+ [39] A. Taik, Z. Mlika, and S. Cherkoui, "Data-Aware Device Scheduling for Federated Edge Learning," IEEE Transactions on Cognitive Communications and Networking, pp. 1-1, 2021.
475
+ [40] M. Ren, J. Zhang, L. Khoukhi, H. Labiod, and V. Véque, “A Unified Framework of Clustering Approach in Vehicular Ad Hoc Networks,” IEEE Transactions on Intelligent Transportation Systems, vol. 19, no. 5, pp. 1401–1414, May 2018, iEEE Transactions on Intelligent Transportation Systems.
476
+ [41] W. Li, A. Tizghadam, and A. Leon-Garcia, "Robust clustering for connected vehicles using local network criticality," in 2012 IEEE International Conference on Communications (ICC), Jun. 2012, pp. 7157-7161, iSSN: 1938-1883.
477
+ [42] M. Ren, L. Khoukhi, H. Labiod, J. Zhang, and V. Véque, “A mobility-based scheme for dynamic clustering in vehicular ad-hoc networks (VANETs),” vol. 9, pp. 233–241, 2017. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S2214209616300699
478
+ [43] D. Pisinger, "Where are the hard knapsack problems?" Computers & Operations Research, vol. 32, no. 9, pp. 2271-2284, Sep. 2005. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S030505480400036X
479
+ [44] T. H. Cormen, C. E. Leiserson, R. L. Rivest, and C. Stein, Introduction to Algorithms. MIT Press, Jul. 2009.
480
+ [45] C. Chen, S. Chester, V. Srinivasan, K. Wu, and A. Thomo, "Group-Aware Weighted Bipartite B-Matching," in Proceedings of the 25th ACM International on Conference on Information and Knowledge Management, ser. CIKM '16. New York, NY, USA: Association for Computing Machinery, Oct. 2016, pp. 459-468. [Online]. Available: https://doi.org/10.1145/2983323.2983770
481
+ [46] F. Ahmed, J. P. Dickerson, and M. Fuge, "Diverse weighted bipartite b-matching," Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence, Aug 2017. [Online]. Available: http://dx.doi.org/10.24963/ijcai.2017/6
482
+ [47] "Optimization with PuLP — PuLP 2.5.0 documentation." [Online]. Available: https://coin-or.github.io/pulp/
483
+ [48] "PyTorch." [Online]. Available: https://www.pytorch.org
484
+ [49] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner, "Gradient-based learning applied to document recognition," Proceedings of the IEEE, vol. 86, no. 11, pp. 2278-2324, Nov. 1998.
485
+ [50] H. Xiao, K. Rasul, and R. Vollgraf, "Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms," arXiv:1708.07747 [cs, stat], Sep. 2017, arXiv:1708.07747. [Online]. Available: http://arxiv.org/abs/1708.07747
486
+ [51] V. Tolpegin, S. Truex, M. E. Gursoy, and L. Liu, "Data Poisoning Attacks Against Federated Learning Systems," arXiv:2007.08432 [cs, stat], Aug. 2020, arXiv: 2007.08432. [Online]. Available: http://arxiv.org/abs/2007.08432
487
+ [52] L. Liang, G. Y. Li, and W. Xu, "Resource Allocation for D2D-Enabled Vehicular Communications," IEEE Transactions on Communications, vol. 65, no. 7, pp. 3186-3197, Jul. 2017, note: IEEE Transactions on Communications.
488
+ [53] H. Moudoud, S. Cherkououi, and L. Khoukhi, “Towards a scalable and trustworthy blockchain: Iot use case,” in ICC 2021 - IEEE International Conference on Communications, 2021, pp. 1-6.
2201.11xxx/2201.11271/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53bfa5b555a3a634190ffec2047feb64414ba1c03b392c68efa3c7a35795e19b
3
+ size 582012
2201.11xxx/2201.11271/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11279/beaafe54-2b93-4ee7-a89d-737420e9e531_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d239176c7574b03e5a086fed24f356ae30edd841bc1f971cd49e14bbb32b3307
3
+ size 3838234
2201.11xxx/2201.11279/full.md ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Revisiting RCAN: Improved Training for Image Super-Resolution
2
+
3
+ Zudi Lin $^{1\dagger}$ Prateek Garg $^{2*}$ Atmadeep Banerjee $^{2*}$ Salma Abdel Magid $^{1}$ Deqing Sun $^{3}$
4
+
5
+ Yulun Zhang $^{4}$ Luc Van Gool $^{4}$ Donglai Wei $^{5}$ Hanspeter Pfister $^{1}$
6
+
7
+ $^{1}$ Harvard University $^{2}$ BITS Pilani $^{3}$ Google $^{4}$ ETH Zürich $^{5}$ Boston College
8
+
9
+ # Abstract
10
+
11
+ Image super-resolution (SR) is a fast-moving field with novel architectures attracting the spotlight. However, most SR models were optimized with dated training strategies. In this work, we revisit the popular RCAN model and examine the effect of different training options in SR. Surprisingly (or perhaps as expected), we show that RCAN can outperform or match nearly all the CNN-based SR architectures published after RCAN on standard benchmarks with a proper training strategy and minimal architecture change. Besides, although RCAN is a very large SR architecture with more than four hundred convolutional layers, we draw a notable conclusion that underfitting is still the main problem restricting the model capability instead of overfitting. We observe supportive evidence that increasing training iterations clearly improves the model performance while applying regularization techniques generally degrades the predictions. We denote our simply revised RCAN as RCAN-it and recommend practitioners to use it as baselines for future research. Code is publicly available at https://github.com/zudi-lin/rcan-it.
12
+
13
+ # 1. Introduction
14
+
15
+ Image super-resolution (SR) is a basic computer vision task that focuses on reconstructing high-resolution (HR) details from corresponding low-resolution (LR) images. The frontier of image SR research has been significantly pushed forward with deep neural networks [6-9,22,24,50,52]. For the past several years, the field has put spotlights mostly on novel network architectures.
16
+
17
+ However, new architectures optimized with improved training strategies are usually compared with earlier models trained with dated protocols. The importance of training strategies that contribute collaboratively to the performances is rarely explored. Recent works on image recognition [2, 16] have demonstrated that without or with min
18
+
19
+ ![](images/1caef85a67136507e98b40c290499dac501972b9abc6f39021dd6612fd5e258b.jpg)
20
+ Figure 1. Improving SR with better training. A proper training strategy and minimal architecture change can significantly improve the performance of RCAN [50] and match the empirical gain from novel architectures. Year $(x$ -axis) is a rough estimation.
21
+
22
+ Table 1. Quantitative comparison between original RCAN [50] and our RCAN with improved training (RCAN-it). Results are evaluated by the PSNR (dB) metric for two SR scales.
23
+
24
+ <table><tr><td>Method</td><td></td><td>Set5</td><td>Set14</td><td>B100</td><td>Urban100</td><td>Manga109</td></tr><tr><td>RCAN [50]</td><td rowspan="2">x2</td><td>38.27</td><td>34.12</td><td>32.41</td><td>33.34</td><td>39.44</td></tr><tr><td>RCAN-it</td><td>38.37</td><td>34.49</td><td>32.48</td><td>33.62</td><td>39.88</td></tr><tr><td>RCAN [50]</td><td rowspan="2">x4</td><td>32.63</td><td>28.87</td><td>27.77</td><td>26.82</td><td>31.22</td></tr><tr><td>RCAN-it</td><td>32.69</td><td>28.99</td><td>27.87</td><td>27.16</td><td>31.78</td></tr></table>
25
+
26
+ imal change to "traditional" neural network architectures like ResNet [15], those models can match or surpass the performance of novel architectures proposed after by just improving the training and regularization techniques. Similar observations were also indicated in optical flow estimation [41, 42]. Such evidence motivates us to interrogate the training strategies for "traditional" SR architectures for better understanding the sources of empirical gains [28] in this field and unlocking their potential.
27
+
28
+ To this end, we survey modern training and regularization strategies widely in use for other vision tasks and focus on improving the performance of the Residual Channel Attention Network (RCAN) [50], which was released more than three years ago and became a popular architec
29
+
30
+ ture in the SR field. RCAN is a very deep architecture with more than 400 convolutional layers. However, the potential of this very deep architecture can be hindered by dated training protocols in the original version. Surprisingly (or perhaps as expected according to the observations in image recognition and optical flow estimation), RCAN can outperform or match nearly all subsequent CNN-based approaches that claimed superior performance with a proper training scheme. We denote our improved version of RCAN as RCAN-it (it stands for improved training). We show the comparison with state-of-the-art SR approaches in Figure 1 and the direct improvement upon RCAN paper results in Table 1. Specifically, our RCAN-it achieves a PSNR of 39.88 dB for $\times 2$ SR on Manga109 [35], which is an absolute improvement of 0.44 dB over RCAN [50]. The score is better or comparable to the latest works, including CRAN [53] and SwinIR [26]. With self-ensemble inference, the PSNR is further increased to 40.04 dB, outperforming all existing approaches with or without self-ensemble.
31
+
32
+ We conduct comprehensive ablation studies from the training perspective and have several notable results. First, although RCAN is a huge SR architecture, we notice that the model performance is currently restricted by underfitting instead of overfitting when training on the widely used DF2K dataset [27]. We draw this conclusion based on the observations that simply increasing the training iterations can clearly improve the prediction scores, while regularization techniques effective for image recognition models generally decrease the SR model performance<sup>1</sup>. Second, using a large-batch training scheme with state-of-the-art optimizers and learning-rate scheduling rules, the model can already match the results of the original training protocol with $4 \times$ less training time. Third, a simple architecture modification that changes ReLU nonlinearity to SiLU [18, 39] achieves similar improvement as training the baseline model $2 \times$ longer. Fourth, with warm-start that initializes SR networks with different scales using the pretrained $\times 2$ model, we can further save the training time and cost for $\times 3$ and $\times 4$ models while still achieving competitive performance. We also conducted an oracle study on the benchmark sets and showed that there is still large space for improvement before reaching the upper bound of the RCAN architecture, shedding light on future directions like diversifying training data and domain adaptation.
33
+
34
+ To summarize, our work makes three main contributions. First, different from recent image SR publications that emphasize architectures, we investigate the optimization aspect of this low-level vision task and identify the underfitting problem in deep SR networks. Second, with an effective large-batch training framework, updated learning protocol, and minimal architectural modification, our RCAN-it
35
+
36
+ outperforms or matches the latest approaches released more than three years later than RCAN [50]. Third, our comprehensive empirical studies demonstrate the interplay of different training strategies and provide practitioners with valuable information for future research.
37
+
38
+ # 2. Related Work
39
+
40
+ Image super-resolution. Image super-resolution is a classic low-level computer vision problem [12]. Deep neural networks have become the de facto methodology for image SR in the past several years thanks to their ability to model the end-to-end mapping between LR and HR images. Starting from the pioneering work where the model is a simple three-layer CNN [8], developing better architectures has then become the central topic in this field. Subsequent improvements are achieved by increasing the depth and width of models [22], applying residual [27, 50] and dense [52] connections, as well as introducing different channel and spatial attention techniques [6, 7, 33, 37, 50]. Recent work even gets rid of the CNN-based architectures and utilizes a transformer [44] (specifically a Swin Transformer [31]) for image restoration [26]. We champion the success of better SR architectures, but from a machine learning perspective, good predictions result from the interplay of architecture, training data, and optimization strategies. Previous works clearly demonstrate that image SR can benefit from better architectures originally proposed for high-level vision tasks [15, 19, 31], but pays less attention to the training strategies that achieve comparable or even large empirical gains than architectures [2, 16]. Therefore we extensively investigate the rarely interrogated training strategies in the SR field and show good practice for training "traditional" deep SR architectures like RCAN [50].
41
+
42
+ Training and regularization strategies. Image recognition literature has demonstrated that a proper training strategy can effectively decrease the training time using parallelism [13, 46] and increase the performance with better optimization strategies and regularization techniques [2, 16]. In large-batch training, the learning rate needs to be scaled (usually linearly with the batch size [13]) to compensate for the reduced number of parameter updates. Some works use warm-up [13, 16] to avoid the sudden increase of learning rate for training stability. Optimizers like LAMB [47] improve upon Adam [23] with a layer-wise adaptive mechanism. We adopt large-batch training for RCAN [50] in image SR to effectively decrease the training time while achieving comparable performance as the original protocol.
43
+
44
+ Regularizations like weight decay, stronger data augmentations [4], mixup [49] and stochastic depth [20] usually boost the accuracy of deep classification models. We thus study their effects on image SR, a basic low-level vision topic. Different from image recognition where the
45
+
46
+ main challenge is overfitting (e.g., training ResNet [15] for more epochs decrease the classification accuracy [2]), we show evidence that the performance of deep SR models [50] is restricted by underfitting, demonstrating the unique characteristics of different computer vision fields. We thus provide a practical training strategy for SR models.
47
+
48
+ # 3. Methodology
49
+
50
+ In this section, we first revisit the base RCAN architecture and show a small modification to the activation function (Sec. 3.1). We then discuss the training and regularization strategies tested throughout the paper (Sec. 3.2).
51
+
52
+ # 3.1. Architecture
53
+
54
+ The Residual Channel Attention Network (RCAN) [50] is a popular SR architecture that features three main improvements upon previous work like EDSR [27]. First, it uses a squeeze-and-excitation (or channel-attention) block [19] after the second $3 \times 3$ convolution layer in each residual block to re-weight the importance of different channels. Second, it has a novel residual-in-residual design with long skip connections over multiple residual blocks to bypass low-frequency information and facilitate better learning of high-frequency details. Third, RCAN is a very deep architecture with a large model capacity as it has 200 residual blocks and more than 400 convolutional layers. In this work, we keep the RCAN architecture mostly untouched. The only modification we tested from the architectural perspective is to substitute the original ReLU activation with the Sigmoid Linear Unit (SiLU) [18] (also known as Swish [39]) activation function:
55
+
56
+ $$
57
+ f (x) = x \cdot \sigma (x), \tag {1}
58
+ $$
59
+
60
+ where $\sigma(x)$ is the sigmoid function. Different from ReLU, SiLU is a smooth, non-monotonic function. Previous results [39] show consistent improvement of SiLU over ReLU on image recognition benchmarks but have not yet explored its impact for low-level vision tasks like image SR.
61
+
62
+ # 3.2. Training Strategies
63
+
64
+ Large-batch optimization. The original RCAN was trained with the Adam [23] optimizer, a small batch size (BS) of 16, and a small learning rate $(\eta)$ of $10^{-4}$ . The consequence is slow convergence for about 7 days on a single GPU device (Table. 2, 1st row). Therefore our first improvement over the original training protocol is to enable large-batch training over multiple GPUs for faster convergence. Since the total number of gradient updates decreases, we apply a linear scaling rule so that when we multiply the minibatch size by $k$ , the learning rate is also multiplied by $k$ . For training stability, we use Lamb [47], a layerwise adaptive
65
+
66
+ optimizer designed for large-batch training. We also substitute the original multi-step learning rate scheduler with cosine annealing [32], whose only hyper-parameter is the total number of iterations (or epochs). We effectively increase the batch size by $\times 16$ and largely save training time using parallelism by employing those techniques.
67
+
68
+ Longer training. We observe supportive evidence that the validation performance of RCAN is still increasing by the end of the baseline training protocol (Fig. 2a), indicating the underfitting problem. Therefore, we apply a straightforward but effective technique, increasing the number of training iterations, to alleviate the challenge of underfitting. Unlike image classification results where longer training decreases the performance [2] due to overfitting, we show in experiments that such a strategy effectively improves the performance of deep SR models like RCAN.
69
+
70
+ Large-patch finetuning. Previous results suggest that training with larger patches improves the performance [26]. However, increasing input height and width under the large-batch setting will quadratically increase the GPU memory usage, making the training costly or even infeasible under a hardware budget. Therefore we design a two-stage training strategy where the model is first optimized with standard patch size $(48 \times 48)$ as RCAN [50]. We then finetune the model with larger patches $(64 \times 64)$ for a smaller number of iterations to enjoy the benefit of a larger input field-of-view (batch size is reduced accordingly to fit GPU memory).
71
+
72
+ Low-precision training. In popular deep learning frameworks like PyTorch [38], neural networks are optimized with 32-bit float point (FP32) precision. Existing work for image recognition indicates that low-precision training (reduces data, model parameters, and gradients to FP16 precision) can preserve or even slightly improve the original model accuracy with significantly decreased training time and GPU memory usage [16]. We therefore also examine this technique with RCAN and show that low-precision (FP16) training has different behaviors in image SR.
73
+
74
+ Regularization techniques. Besides improving training strategies, we also tested regularization techniques widely used for image recognition, including stronger data augmentations, mixup [49] and stochastic depth [20]. For augmentations, we add random invert and channel shuffle. For mixup we use $\mathrm{Beta}(0.15, 0.15)$ to produce random interpolation weights. For stochastic depth, we randomly skip a residual block with a probability of 0.5. Although adding regularizations can effectively boost the performance of large image recognition models [2, 20], we show in experiments that RCAN does not benefit from regularizations as it suffers from underfitting instead of overfitting (Sec.4.2).
75
+
76
+ Warm start. For different SR scales, $\times k, k \in \{2, 3, 4\}$ , most RCAN layers have identical structures except the tail module that contains unique convolution and pixel
77
+
78
+ Table 2. Baseline results (PSNR) for $\times 2$ SR. We train RCAN [50] on DF2K with the original protocol (1st row) and our large-batch training strategies (2nd and 3rd rows). The highlighted setting matches the original strategy and reduces the training time by $77\%$ , which is used as the baseline for further improvements.
79
+
80
+ <table><tr><td>BS, η</td><td>Time</td><td>Set5</td><td>Set14</td><td>B100</td><td>Urban100</td></tr><tr><td>16, 0.0001</td><td>7 Days</td><td>38.35</td><td>34.33</td><td>32.48</td><td>33.59</td></tr><tr><td>256, 0.0016</td><td>1.6 Days</td><td>38.34</td><td>34.35</td><td>32.45</td><td>33.52</td></tr><tr><td>256, 0.0032</td><td>1.6 Days</td><td>38.35</td><td>34.42</td><td>32.46</td><td>33.61</td></tr></table>
81
+
82
+ ![](images/c8be479992a3485ec9d37bdc24612d7c42ab5af22bd411f7cb046fcc8bde2b28.jpg)
83
+ Figure 2. Validation curve of baseline RCAN. We show (a) $\times 2$ PSNR on the DF2K validation set and (b) learning rate schedule for the highlighted baseline in Table 2. The PSNR performance is still increasing by the end of the training, indicating underfitting.
84
+
85
+ ![](images/d4d9f0ba5adc02913fadd943b4a3c07b90179e2f2abfdbb42e202c78376bc416.jpg)
86
+
87
+ shuffle [40] layers. Therefore when having a pretrained $\times 2$ model, we can directly transfer the weights to $\times 3$ and $\times 4$ models instead of training them from scratch, which is denoted as warm start. Since the tail modules are not shared, we first finetune only the random initialized tail module with other layers frozen until convergence, which takes $< 1$ hour. We then finetune the whole model with only $50\%$ of the normal iterations to save time and cost.
88
+
89
+ # 4. Experiments
90
+
91
+ We first describe the data, metrics and baseline models (Sec. 4.1). We then show the impacts of different training strategies (Sec. 4.2) and benchmark comparisons (Sec. 4.3).
92
+
93
+ # 4.1. Setup
94
+
95
+ Dataset and metric. Following latest publications [26, 33, 53], we use the DF2K dataset for training, which combines DIV2K [43] and Flickr2K [27] together to create a single training set with totally 3,550 images. As a common practice, we evaluate models on five standard benchmark datasets including Set5 [3], Set14 [48], B100 [34], Urban100 [21], and Manga109 [35]. For the metrics, we report the peak signal-to-noise ratio (PSNR) in all experiments, and also the structural similarity metric (SSIM) [45] in benchmark comparison. At evaluation time, the RGB images are first transformed into the YCbCr space, and the metrics are applied to the Y channel (i.e., luminance).
96
+
97
+ Updated RCAN Baseline. We use the standard RCAN
98
+
99
+ model, which consists of ten residual groups (RG), while each RG contains 20 residual blocks and one convolution layer. The number of channels for all layers is fixed to 64, except the input and upsampling layers. We use input patches of size $48 \times 48$ as RCAN. For simplicity and consistency among experiments, we only use the pixel-wise mean-absolute-error $(\ell_1)$ loss. Inputs are augmented with random horizontal and vertical flips as well as transpose.
100
+
101
+ Since inputs are uniformly cropped from large training images, we use iterations instead of epochs to denote the training length. The original strategy uses a batch size (BS) of 16 and a learning rate $(\eta)$ of $10^{-4}$ . The model is optimized with ADAM [23] $(\beta_{1} = 0.9, \beta_{2} = 0.99)$ for 1,725K iterations, and $\eta$ is halved every $20\%$ of the total iterations. In our large-batch scheme, we use a BS=256 and scale $\eta$ with the linear scaling rule. For stability, we change ADAM to the Lamb [47] optimizer. The original training scheme used a single Nvidia V100 GPU and finished the training in about 7 days. We use 4 V100 GPUs in our large-batch setting and finish the training in 1.6 days for 80K iterations. The system is implemented with PyTorch [38].
102
+
103
+ Table 2 shows that if we strictly follow the linear scaling rule to scale the $\eta$ from $10^{-4}$ ( $0.0016 = \frac{256}{16} \cdot 10^{-4}$ ), there is a performance gap to the original protocol. If we scale $\eta$ from $2 \cdot 10^{-4}$ , the performance becomes comparable to the original setting but only uses $23\%$ of the original training time. Therefore, we set BS=256, $\eta = 0.0032$ as the baseline for investigating the impact of other training techniques in the following experiments. Note that the original setting results are better than the RCAN paper results [50] as it is optimized on the larger DF2K dataset, while the paper results are produced with DIV2K [43] only.
104
+
105
+ We also observe that training is unstable with large learning rates. When $\eta = 0.0064$ , the model produces undefined numbers (NaN) after $\sim 2\mathrm{K}$ iterations. For $\eta = 0.0048$ , the model produces NaN after $\sim 16\mathrm{K}$ iterations. Therefore with BS=256, increasing $\eta$ for faster convergence is not practicable. Techniques like gradient clipping may alleviate the problem, and we leave that for future exploration.
106
+
107
+ # 4.2. Impact of Training Strategies
108
+
109
+ Individual changes. We show the ablation studies of architecture modifications, different training strategies, and regularizations in Table 3. Changing ReLU to SiLU, longer training (training RCAN for 160K iterations, $2 \times$ the 80K iteration baseline), and finetuning with larger patches ( $64 \times$ 64) can consistently improve the PSNR scores on all five benchmark datasets when working independently with the baseline. The average improvements for those three updates are at least 0.042 dB. Notably, the observation that longer training and large-patch finetuning improve the test performance further supports our claim that current deep SR models like RCAN suffers from underfitting, while those strate
110
+
111
+ Table 3. Ablation studies of training options. The impact of architecture modifications, training strategies and regularizations are shown upon the updated RCAN baseline (highlighted in Table 2) for $\times 2$ SR. $^\dagger$ Longer training means $2\times$ the total iterations of baseline. $^\ddagger$ We finetune on larger patches instead of training from scratch to save computing resource. Results are produced with self-ensemble.
112
+
113
+ <table><tr><td rowspan="2">Configuration</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td><td colspan="2">Average</td></tr><tr><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td></tr><tr><td>Baseline</td><td>38.350</td><td>-</td><td>34.415</td><td>-</td><td>32.463</td><td>-</td><td>33.610</td><td>-</td><td>39.866</td><td>-</td><td>35.741</td><td>-</td></tr><tr><td>w/ SiLU Activation</td><td>38.369</td><td>+0.019</td><td>34.463</td><td>+0.048</td><td>32.475</td><td>+0.012</td><td>33.669</td><td>+0.059</td><td>39.936</td><td>+0.070</td><td>35.783</td><td>+0.042</td></tr><tr><td>w/ Longer Training†</td><td>38.395</td><td>+0.045</td><td>34.438</td><td>+0.023</td><td>32.490</td><td>+0.028</td><td>33.656</td><td>+0.047</td><td>39.961</td><td>+0.095</td><td>35.788</td><td>+0.047</td></tr><tr><td>w/ Larger Patches‡</td><td>38.367</td><td>+0.017</td><td>34.473</td><td>+0.058</td><td>32.484</td><td>+0.021</td><td>33.687</td><td>+0.077</td><td>39.929</td><td>+0.063</td><td>35.788</td><td>+0.047</td></tr><tr><td>w/ FP16 Precision</td><td>38.360</td><td>+0.010</td><td>34.379</td><td>-0.036</td><td>32.450</td><td>-0.013</td><td>33.530</td><td>-0.080</td><td>39.812</td><td>-0.054</td><td>35.706</td><td>-0.035</td></tr><tr><td>w/ Color Augmentation</td><td>38.341</td><td>-0.009</td><td>34.414</td><td>-0.001</td><td>32.457</td><td>-0.006</td><td>33.592</td><td>-0.017</td><td>39.904</td><td>+0.038</td><td>35.742</td><td>+0.001</td></tr><tr><td>w/ Mixup</td><td>38.354</td><td>+0.004</td><td>34.328</td><td>-0.087</td><td>32.453</td><td>-0.009</td><td>33.532</td><td>-0.078</td><td>39.853</td><td>-0.013</td><td>35.704</td><td>-0.037</td></tr><tr><td>w/ Stochastic Depth</td><td>38.184</td><td>-0.166</td><td>33.902</td><td>-0.513</td><td>32.335</td><td>-0.128</td><td>32.760</td><td>-0.850</td><td>39.276</td><td>-0.590</td><td>35.292</td><td>-0.449</td></tr></table>
114
+
115
+ Table 4. Additive improvement of training options (×2 SR PSNR). Upon the updated RCAN baseline (highlighted in Table 2), combining SiLU and longer training can increase the average score by 0.1 dB. Fine-tuning on larger patches (64 × 64) further improves the performance. *Oracle denotes the model finetuned on the benchmark sets, which indicates the upper bound of RCAN.
116
+
117
+ <table><tr><td rowspan="2">Configuration</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td><td colspan="2">Average</td></tr><tr><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td></tr><tr><td>Baseline</td><td>38.350</td><td>-</td><td>34.415</td><td>-</td><td>32.463</td><td>-</td><td>33.610</td><td>-</td><td>39.866</td><td>-</td><td>35.741</td><td>-</td></tr><tr><td>+ SiLU &amp; Train Longer</td><td>38.405</td><td>+0.055</td><td>34.527</td><td>+0.112</td><td>32.497</td><td>+0.034</td><td>33.747</td><td>+0.137</td><td>40.003</td><td>+0.137</td><td>35.836</td><td>+0.095</td></tr><tr><td>+ Larger Patches</td><td>38.406</td><td>+0.001</td><td>34.546</td><td>+0.019</td><td>32.511</td><td>+0.014</td><td>33.790</td><td>+0.043</td><td>40.041</td><td>+0.038</td><td>35.859</td><td>+0.023</td></tr><tr><td>Oracle*</td><td>39.273</td><td>+0.867</td><td>35.010</td><td>+0.464</td><td>32.622</td><td>+0.111</td><td>34.006</td><td>+0.216</td><td>40.321</td><td>+0.280</td><td>36.338</td><td>+0.388</td></tr></table>
118
+
119
+ gies are reasonable for alleviating the problem.
120
+
121
+ Unlike in image classification [16], low-precision (FP16) training and regularization techniques generally degrade the performance (Table 3, bottom half). Although FP16 training can reduce $50\%$ GPU memory and over $30\%$ of training time, it decreases the average PSNR by 0.035 and sometimes outputs NaN even with the baseline learning rate. Stochastic depth, a strong regularization that works well for training very deep classification models [2, 20], decreases the SR scores significantly. We argue this is because randomly silencing residual blocks in a dense regression task makes a relatively large change in the loss, causing inaccurate gradient estimation. One exception in regularizations is that since Manga109 [35] consists of images from manga (comic books) while the training set contains mostly natural images, color augmentations have a positive effect on this benchmark. In summary, the results show that techniques with empirical gains in image recognition can have different behaviors in image super-resolution, demonstrating the uniqueness of computer vision domains. We thus suggest SR researchers be careful with regularizations when training models on relatively large datasets like DF2K.
122
+
123
+ Additive improvement. Based on the individual results (Table 3), we keep techniques that consistently improve model performance into our protocol and study their additive effect (Table 4). Longer training of the SiLU model boosts the average PSNR by $0.095\mathrm{dB}$ , even better than
124
+
125
+ adding up the individual improvements of both techniques. Large-patch finetuning has a smaller effect compared with starting from baseline but still consistently increase the scores and achieve a state-of-the-art PSNR of 40.04 on Manga109. So far, the best model is optimized for 200K iterations (160K training + 40K large-patch finetuning) on 4 V100 GPUs for 4 days. We believe additional training can further improve the scores (e.g., SwinIR [26] is optimized for more than 500K iterations).
126
+
127
+ We also show the oracle results to understand the upper bound of the RCAN architecture (Table 4, last line). The best $\times 2$ model is finetuned on each test set independently until convergence. Interestingly, the performance on the two smallest benchmark sets (i.e., Set5 and Set14) is farthest from saturation. The gap between the best model and oracle (an average PSNR of $0.388\mathrm{dB}$ ) indicates that we still have large space for improvement even with the relatively "dated" RCAN architecture. Besides, Set5 [3] is known to have JPEG artifacts, while Manga109 [35] consists of comic images whose statistics significantly differ from the training data. Therefore we suggest future research to focus on diversifying training data, designing better optimization strategies, and testing domain adaptation to further narrow the performance gap besides upgrading the model.
128
+
129
+ Warm start. As described before, we start from a pretrained $\times 2$ SR model to initialize the weights of the $\times 3$ and $\times 4$ models as they share the training data (just with dif
130
+
131
+ Table 5. Effect of finetuning from warm start for $\times 3$ and $\times 4$ SR. By finetuning only the tail module from a pretrained $\times 2$ architecture, the model can already match RCAN paper results [50]. Normal finetuning for 80K iterations (50% of the longer training settings) significantly boosts the performance. Large-patch finetuning still increases the scores, but the improvement is relatively marginal.
132
+
133
+ <table><tr><td rowspan="2">Configuration</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td><td colspan="2">Average</td></tr><tr><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td></tr><tr><td>Warm Start (×3 SR)</td><td>34.878</td><td>-</td><td>30.749</td><td>-</td><td>29.377</td><td>-</td><td>29.237</td><td>-</td><td>34.883</td><td>-</td><td>31.825</td><td>-</td></tr><tr><td>+ Normal Finetuning</td><td>34.935</td><td>+0.057</td><td>30.847</td><td>+0.098</td><td>29.432</td><td>+0.055</td><td>29.546</td><td>+0.309</td><td>35.109</td><td>+0.226</td><td>31.974</td><td>+0.149</td></tr><tr><td>+ Larger Patches</td><td>34.940</td><td>+0.005</td><td>30.858</td><td>+0.011</td><td>29.439</td><td>+0.007</td><td>29.567</td><td>+0.021</td><td>35.132</td><td>+0.023</td><td>31.987</td><td>+0.013</td></tr><tr><td>Warm Start (×4 SR)</td><td>32.719</td><td>-</td><td>28.986</td><td>-</td><td>27.847</td><td>-</td><td>27.024</td><td>-</td><td>31.676</td><td>-</td><td>29.650</td><td>-</td></tr><tr><td>+ Normal Finetuning</td><td>32.805</td><td>+0.086</td><td>29.077</td><td>+0.091</td><td>27.909</td><td>+0.062</td><td>27.325</td><td>+0.301</td><td>32.014</td><td>+0.338</td><td>29.826</td><td>+0.176</td></tr><tr><td>+ Larger Patches</td><td>32.809</td><td>+0.004</td><td>29.081</td><td>+0.004</td><td>27.912</td><td>+0.003</td><td>27.338</td><td>+0.013</td><td>32.035</td><td>+0.021</td><td>29.835</td><td>+0.009</td></tr></table>
134
+
135
+ ![](images/c6d4d0833edfc9ad9900ca279dc15117a46158b316564dcdca37f9cff18c3ebc.jpg)
136
+ Urban100: img_044 $(\times 4)$
137
+
138
+ ![](images/5a05b0bb0e4b4de28618ad30d13bfb23b5075dc6e88aa2820dbff4a3375f9c6c.jpg)
139
+ HQ
140
+
141
+ ![](images/f7d6a27cbdbf2c0ebffd38297fca0912a60fa79d6f2c39125ca09bff46a9fba8.jpg)
142
+ Bicubic
143
+
144
+ ![](images/423df69e7167deda46c62b14bb40adab91c685afb717a2212ca23c2ceffeeeb9.jpg)
145
+ EDSR [27]
146
+
147
+ ![](images/c078b3ccd4419b4e20db411e80342a7bec4328729453275496abe96880d0f393.jpg)
148
+ RDN [52]
149
+
150
+ ![](images/3916a04ad255b3552d3625232e681a286be2c6a5db9009b9bacd09b7c61d0d23.jpg)
151
+ RCAN [50]
152
+
153
+ ![](images/b23f9731266f951930ec1f2d4979aa48f7e233f48a96c3a8c466b97da70050d7.jpg)
154
+ Urban100: img_067 $(\times 4)$
155
+
156
+ ![](images/7b46bceec6c579f5989ea8e20a6105f351ef2f2c42c7560a47a597e87ab0ce21.jpg)
157
+ HO
158
+
159
+ ![](images/1563d82177bc890a6fc9e094e326379c7bc203bba0c61a573c0725c0c5aeb9c3.jpg)
160
+ Bicubic
161
+
162
+ ![](images/ae4cbbb6f4aec6982a2ab8d1eddfe9a7e93af7b0affb43698822ba75ce68d0d5.jpg)
163
+ EDSR [27]
164
+
165
+ ![](images/b632e7945c8282d0185d3bc7a2f30fb8abea2e5a6132be86d180f69735a51318.jpg)
166
+ RDN [52]
167
+
168
+ ![](images/baba237e4477450cd6815fb10b01b8ef46aeb49433a291b3acae3e6d76026f60.jpg)
169
+ RCAN [50]
170
+
171
+ ![](images/79ffdde4e786957bada2a350a196efda8ba00c1d89fe3b29b3486ea9b426ab27.jpg)
172
+ Urban100: img_078 (×4)
173
+
174
+ ![](images/3d029e6ad0b8b01e28c3d5ac3cd0d698246f5bad34f0f5fe407967cd28954544.jpg)
175
+
176
+ ![](images/38743c3c0e29942334fa37e40e6a5e71cb9a3d0c9e777ff17dce968a8aecb80d.jpg)
177
+
178
+ ![](images/4c1b118472abbbcfac8874d552afa2441c9897e8309ca5fc37538e2e6520bb47.jpg)
179
+
180
+ ![](images/3b1e1cac9dc724c468ca46660948f1c803b314fafcaac6062c73b89fd14b7ae4.jpg)
181
+
182
+ ![](images/65dc2f00d4176c0bd6cf4f66d1b97b06dc4b591664fda0bd7e0e72e5f8ad75f6.jpg)
183
+
184
+ Urban100: img_092 $(\times 4)$
185
+
186
+ ![](images/8491830029e20cb53fe52cb1747319e359de888c1b2c3451de3222e8be2fd457.jpg)
187
+ HQ
188
+ SRFBN [25]
189
+
190
+ ![](images/7a81fb3e0beb4a4763cbfe2eb9100a40cd15a22d1e415a95fb7b811465f3de02.jpg)
191
+ Bicubic
192
+ IGNN [54]
193
+
194
+ ![](images/96aee222ee11038e8963c06ab96841bc196b61360f0611c93b0b24af9ffbc09d.jpg)
195
+ EDSR [27]
196
+ CSNLN [36]
197
+
198
+ ![](images/522de7002900a0396e7985120ccaee20fb022d1ca54fce0a421a5c66cb07870c.jpg)
199
+ RDN [52]
200
+ RFANet [30]
201
+
202
+ ![](images/537f77c307affdfd82278103bc9ab88abca7501b5fb7e68391c9a95e276b2c10.jpg)
203
+ RCAN [50]
204
+ RCAN-it (ours)
205
+
206
+ ![](images/c9643a053641d60c45dda74a3fc23fb4e72b7ec1d2b418189741dbf1e9458517.jpg)
207
+ HQ
208
+
209
+ ![](images/b8d641208ff5b6764c5839ca5fe8138137d9ddadac4e736c6b027311f0e37802.jpg)
210
+ Bicubic
211
+
212
+ ![](images/b4d251503979cb80f8dab7156c5eee8ab0533999107fec893ef64daf54e9c88d.jpg)
213
+ EDSR [27]
214
+
215
+ ![](images/1c02407c8191483faf095b9073ac2f97ff51b19bd651391480e6efc2438185c2.jpg)
216
+ RDN [52]
217
+
218
+ ![](images/a09969a6824c561febc3226e9a7b490872b9aac4f76d3b373380d7a05c154405.jpg)
219
+ RCAN [50]
220
+
221
+ Urban100: img_092 $(\times 4)$
222
+
223
+ ![](images/a9e022e58eb823a193f911e51057760172a4a86ef9624010b20afa193eeeff68.jpg)
224
+ SRFBN [25]
225
+ Figure 3. Visual comparison $(\times 4)$ with large SR networks on the Urban100 [21] dataset. Our simply improved RCAN-it can better reconstruct high-frequency details than RCAN [50] and also compare favorably with previous state-of-the-art approaches.
226
+
227
+ ![](images/2efef39a6dd85b16520a00d754e9c42747a24b5e2e3416af6bd1ffb70879d9de.jpg)
228
+ IGNN [54]
229
+
230
+ ![](images/09513c203415a2e46f7c83b8af86704874f0517c0bef131918ec9099750395a5.jpg)
231
+ CSNLN [36]
232
+
233
+ ![](images/a86e33709163250dd2ed9e6d1de12c3b3cd3492685c94cc19a16c3006c91dc1d.jpg)
234
+ RFANet [30]
235
+
236
+ ![](images/756fea22b5d13363bb05409681002f3aac260feb3b42e0d305d34cf4e8ca631d.jpg)
237
+ RCAN-it (ours)
238
+
239
+ ferent down-sampling ratios), which can save training cost. By finetuning only the tail module while keeping other layers frozen, $\times 3$ and $\times 4$ models can already match or be slightly lower than the original results in the RCAN paper (Table 5). This shows that warm-start is an effective and efficient way to construct models with different SR scales.
240
+
241
+ Finetuning the whole model for 80K iterations (50% of the longer training setting in Table 4) can boost the average performance by 0.149 dB for $\times 3$ SR and 0.176 dB for $\times 4$ SR. We also notice that large-patch finetuning still increases the SR scores for both scales (Table 5), but the improvement is relatively marginal compared with the $\times 2$ case. Our re
242
+
243
+ Table 6. Quantitative comparison with CNN-based SR approaches. We show the PSNR (dB) and SSIM for $\times 2$ , $\times 3$ and $\times 4$ SR tasks on standard benchmark datasets. Our simply improved RCAN-it outperforms or matches existing CNN-based models on almost all metrics, demonstrating the practical value of the improved training strategies. Symbol + indicates results produced with self-ensemble at inference time (note that DFSA [33] only reports self-ensemble scores).
244
+
245
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Year</td><td rowspan="2">Scale</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td></tr><tr><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td></tr><tr><td>EDSR [27]</td><td>2017</td><td>×2</td><td>38.11</td><td>0.9602</td><td>33.92</td><td>0.9195</td><td>32.32</td><td>0.9013</td><td>32.93</td><td>0.9351</td><td>39.10</td><td>0.9773</td></tr><tr><td>DBPN [14]</td><td>2018</td><td>×2</td><td>38.09</td><td>0.9600</td><td>33.85</td><td>0.9190</td><td>32.27</td><td>0.9000</td><td>32.55</td><td>0.9324</td><td>38.89</td><td>0.9775</td></tr><tr><td>RDN [52]</td><td>2018</td><td>×2</td><td>38.24</td><td>0.9614</td><td>34.01</td><td>0.9212</td><td>32.34</td><td>0.9017</td><td>32.89</td><td>0.9353</td><td>39.18</td><td>0.9780</td></tr><tr><td>RCAN [50]</td><td>2018</td><td>×2</td><td>38.27</td><td>0.9614</td><td>34.12</td><td>0.9216</td><td>32.41</td><td>0.9027</td><td>33.34</td><td>0.9384</td><td>39.44</td><td>0.9786</td></tr><tr><td>NLRN [29]</td><td>2018</td><td>×2</td><td>38.00</td><td>0.9603</td><td>33.46</td><td>0.9159</td><td>32.19</td><td>0.8992</td><td>31.81</td><td>0.9249</td><td>-</td><td>-</td></tr><tr><td>RNAN [51]</td><td>2019</td><td>×2</td><td>38.17</td><td>0.9611</td><td>33.87</td><td>0.9207</td><td>32.31</td><td>0.9014</td><td>32.73</td><td>0.9340</td><td>39.23</td><td>0.9785</td></tr><tr><td>SRFBN [25]</td><td>2019</td><td>×2</td><td>38.11</td><td>0.9609</td><td>33.82</td><td>0.9196</td><td>32.29</td><td>0.9010</td><td>32.62</td><td>0.9328</td><td>39.08</td><td>0.9779</td></tr><tr><td>OISR [17]</td><td>2019</td><td>×2</td><td>38.21</td><td>0.9612</td><td>33.94</td><td>0.9206</td><td>32.36</td><td>0.9019</td><td>33.03</td><td>0.9365</td><td>-</td><td>-</td></tr><tr><td>SAN [6]</td><td>2019</td><td>×2</td><td>38.31</td><td>0.9620</td><td>34.07</td><td>0.9213</td><td>32.42</td><td>0.9028</td><td>33.10</td><td>0.9370</td><td>39.32</td><td>0.9792</td></tr><tr><td>CSNLN [36]</td><td>2020</td><td>×2</td><td>38.28</td><td>0.9616</td><td>34.12</td><td>0.9223</td><td>32.40</td><td>0.9024</td><td>33.25</td><td>0.9386</td><td>39.37</td><td>0.9785</td></tr><tr><td>RFANet [30]</td><td>2020</td><td>×2</td><td>38.26</td><td>0.9615</td><td>34.16</td><td>0.9220</td><td>32.41</td><td>0.9026</td><td>33.33</td><td>0.9389</td><td>39.44</td><td>0.9783</td></tr><tr><td>HAN [37]</td><td>2020</td><td>×2</td><td>38.27</td><td>0.9614</td><td>34.16</td><td>0.9217</td><td>32.41</td><td>0.9027</td><td>33.35</td><td>0.9385</td><td>39.46</td><td>0.9785</td></tr><tr><td>NSR [11]</td><td>2020</td><td>×2</td><td>38.23</td><td>0.9614</td><td>33.94</td><td>0.9203</td><td>32.34</td><td>0.9020</td><td>33.02</td><td>0.9367</td><td>39.31</td><td>0.9782</td></tr><tr><td>IGNNN [54]</td><td>2020</td><td>×2</td><td>38.24</td><td>0.9613</td><td>34.07</td><td>0.9217</td><td>32.41</td><td>0.9025</td><td>33.23</td><td>0.9383</td><td>39.35</td><td>0.9786</td></tr><tr><td>CRAN [53]</td><td>2021</td><td>×2</td><td>38.31</td><td>0.9617</td><td>34.22</td><td>0.9232</td><td>32.44</td><td>0.9029</td><td>33.43</td><td>0.9394</td><td>39.75</td><td>0.9793</td></tr><tr><td>DFSA+ [33]</td><td>2021</td><td>×2</td><td>38.38</td><td>0.9620</td><td>34.33</td><td>0.9232</td><td>32.50</td><td>0.9036</td><td>33.66</td><td>0.9412</td><td>39.98</td><td>0.9798</td></tr><tr><td rowspan="2">RCAN-it RCAN-it+</td><td rowspan="2">2021</td><td rowspan="2">×2</td><td>38.37</td><td>0.9620</td><td>34.49</td><td>0.9250</td><td>32.48</td><td>0.9034</td><td>33.62</td><td>0.9410</td><td>39.88</td><td>0.9799</td></tr><tr><td>38.41</td><td>0.9621</td><td>34.55</td><td>0.9254</td><td>32.51</td><td>0.9038</td><td>33.79</td><td>0.9421</td><td>40.04</td><td>0.9801</td></tr><tr><td>EDSR [27]</td><td>2017</td><td>×3</td><td>34.65</td><td>0.9280</td><td>30.52</td><td>0.8462</td><td>29.25</td><td>0.8093</td><td>28.80</td><td>0.8653</td><td>34.17</td><td>0.9476</td></tr><tr><td>RDN [52]</td><td>2018</td><td>×3</td><td>34.71</td><td>0.9296</td><td>30.57</td><td>0.8468</td><td>29.26</td><td>0.8093</td><td>28.80</td><td>0.8653</td><td>34.13</td><td>0.9484</td></tr><tr><td>RCAN [50]</td><td>2018</td><td>×3</td><td>34.74</td><td>0.9299</td><td>30.65</td><td>0.8482</td><td>29.32</td><td>0.8111</td><td>29.09</td><td>0.8702</td><td>34.44</td><td>0.9499</td></tr><tr><td>NLRN [29]</td><td>2018</td><td>×3</td><td>34.27</td><td>0.9266</td><td>30.16</td><td>0.8374</td><td>29.06</td><td>0.8026</td><td>27.93</td><td>0.8453</td><td>-</td><td>-</td></tr><tr><td>RNAN [51]</td><td>2019</td><td>×3</td><td>34.66</td><td>0.9290</td><td>30.53</td><td>0.8463</td><td>29.26</td><td>0.8090</td><td>28.75</td><td>0.8646</td><td>34.25</td><td>0.9483</td></tr><tr><td>SRFBN [25]</td><td>2019</td><td>×3</td><td>34.70</td><td>0.9292</td><td>30.51</td><td>0.8461</td><td>29.24</td><td>0.8084</td><td>28.73</td><td>0.8641</td><td>34.18</td><td>0.9481</td></tr><tr><td>OISR [17]</td><td>2019</td><td>×3</td><td>34.72</td><td>0.9297</td><td>30.57</td><td>0.8470</td><td>29.29</td><td>0.8103</td><td>28.95</td><td>0.8680</td><td>-</td><td>-</td></tr><tr><td>SAN [6]</td><td>2019</td><td>×3</td><td>34.75</td><td>0.9300</td><td>30.59</td><td>0.8476</td><td>29.33</td><td>0.8112</td><td>28.93</td><td>0.8671</td><td>34.30</td><td>0.9494</td></tr><tr><td>CSNLN [36]</td><td>2020</td><td>×3</td><td>34.74</td><td>0.9300</td><td>30.66</td><td>0.8482</td><td>29.33</td><td>0.8105</td><td>29.13</td><td>0.8712</td><td>34.45</td><td>0.9502</td></tr><tr><td>RFANet [30]</td><td>2020</td><td>×3</td><td>34.79</td><td>0.9300</td><td>30.67</td><td>0.8487</td><td>29.34</td><td>0.8115</td><td>29.15</td><td>0.8720</td><td>34.59</td><td>0.9506</td></tr><tr><td>HAN [37]</td><td>2020</td><td>×3</td><td>34.75</td><td>0.9299</td><td>30.67</td><td>0.8483</td><td>29.32</td><td>0.8110</td><td>29.10</td><td>0.8705</td><td>34.48</td><td>0.9500</td></tr><tr><td>NSR [11]</td><td>2020</td><td>×3</td><td>34.62</td><td>0.9289</td><td>30.57</td><td>0.8475</td><td>29.26</td><td>0.8100</td><td>28.83</td><td>0.8663</td><td>34.27</td><td>0.9484</td></tr><tr><td>IGNNN [54]</td><td>2020</td><td>×3</td><td>34.72</td><td>0.9298</td><td>30.66</td><td>0.8484</td><td>29.31</td><td>0.8105</td><td>29.03</td><td>0.8696</td><td>34.39</td><td>0.9496</td></tr><tr><td>CRAN [53]</td><td>2021</td><td>×3</td><td>34.80</td><td>0.9304</td><td>30.73</td><td>0.8498</td><td>29.38</td><td>0.8124</td><td>29.33</td><td>0.8745</td><td>34.84</td><td>0.9515</td></tr><tr><td>DFSA+ [33]</td><td>2021</td><td>×3</td><td>34.92</td><td>0.9312</td><td>30.83</td><td>0.8507</td><td>29.42</td><td>0.8128</td><td>29.44</td><td>0.8761</td><td>35.07</td><td>0.9525</td></tr><tr><td rowspan="2">RCAN-it RCAN-it+</td><td rowspan="2">2021</td><td rowspan="2">×3</td><td>34.86</td><td>0.9308</td><td>30.76</td><td>0.8505</td><td>29.39</td><td>0.8125</td><td>29.38</td><td>0.8755</td><td>34.92</td><td>0.9520</td></tr><tr><td>34.94</td><td>0.9313</td><td>30.84</td><td>0.8515</td><td>29.44</td><td>0.8133</td><td>29.57</td><td>0.8779</td><td>35.13</td><td>0.9528</td></tr><tr><td>EDSR [27]</td><td>2017</td><td>×4</td><td>32.46</td><td>0.8968</td><td>28.80</td><td>0.7876</td><td>27.71</td><td>0.7420</td><td>26.64</td><td>0.8033</td><td>31.02</td><td>0.9148</td></tr><tr><td>DBPN [14]</td><td>2018</td><td>×4</td><td>32.47</td><td>0.8980</td><td>28.82</td><td>0.7860</td><td>27.72</td><td>0.7400</td><td>26.38</td><td>0.7946</td><td>30.91</td><td>0.9137</td></tr><tr><td>RDN [52]</td><td>2018</td><td>×4</td><td>32.47</td><td>0.8990</td><td>28.81</td><td>0.7871</td><td>27.72</td><td>0.7419</td><td>26.61</td><td>0.8028</td><td>31.00</td><td>0.9151</td></tr><tr><td>RCAN [50]</td><td>2018</td><td>×4</td><td>32.63</td><td>0.9002</td><td>28.87</td><td>0.7889</td><td>27.77</td><td>0.7436</td><td>26.82</td><td>0.8087</td><td>31.22</td><td>0.9173</td></tr><tr><td>NLRN [29]</td><td>2018</td><td>×4</td><td>31.92</td><td>0.8916</td><td>28.36</td><td>0.7745</td><td>27.48</td><td>0.7306</td><td>25.79</td><td>0.7729</td><td>-</td><td>-</td></tr><tr><td>RNAN [51]</td><td>2019</td><td>×4</td><td>32.43</td><td>0.8977</td><td>28.83</td><td>0.7871</td><td>27.72</td><td>0.7410</td><td>26.61</td><td>0.8023</td><td>31.09</td><td>0.9149</td></tr><tr><td>SRFBN [25]</td><td>2019</td><td>×4</td><td>32.47</td><td>0.8983</td><td>28.81</td><td>0.7868</td><td>27.72</td><td>0.7409</td><td>26.60</td><td>0.8015</td><td>31.15</td><td>0.9160</td></tr><tr><td>OISR [17]</td><td>2019</td><td>×4</td><td>32.53</td><td>0.8992</td><td>28.86</td><td>0.7878</td><td>27.75</td><td>0.7428</td><td>26.79</td><td>0.8068</td><td>-</td><td>-</td></tr><tr><td>SAN [6]</td><td>2019</td><td>×4</td><td>32.64</td><td>0.9003</td><td>28.92</td><td>0.7888</td><td>27.78</td><td>0.7436</td><td>26.79</td><td>0.8068</td><td>31.18</td><td>0.9169</td></tr><tr><td>CSNLN [36]</td><td>2020</td><td>×4</td><td>32.68</td><td>0.9004</td><td>28.95</td><td>0.7888</td><td>27.80</td><td>0.7439</td><td>27.22</td><td>0.8168</td><td>31.43</td><td>0.9201</td></tr><tr><td>RFANet [30]</td><td>2020</td><td>×4</td><td>32.66</td><td>0.9004</td><td>28.88</td><td>0.7894</td><td>27.79</td><td>0.7442</td><td>26.92</td><td>0.8112</td><td>31.41</td><td>0.9187</td></tr><tr><td>HAN [37]</td><td>2020</td><td>×4</td><td>32.64</td><td>0.9002</td><td>28.90</td><td>0.7890</td><td>27.80</td><td>0.7442</td><td>26.85</td><td>0.8094</td><td>31.42</td><td>0.9177</td></tr><tr><td>NSR [11]</td><td>2020</td><td>×4</td><td>32.55</td><td>0.8987</td><td>28.79</td><td>0.7876</td><td>27.72</td><td>0.7414</td><td>26.61</td><td>0.8025</td><td>31.10</td><td>0.9145</td></tr><tr><td>IGNNN [54]</td><td>2020</td><td>×4</td><td>32.57</td><td>0.8998</td><td>28.85</td><td>0.7891</td><td>27.77</td><td>0.7434</td><td>26.84</td><td>0.8090</td><td>31.28</td><td>0.9182</td></tr><tr><td>CRAN [53]</td><td>2021</td><td>×4</td><td>32.72</td><td>0.9012</td><td>29.01</td><td>0.7918</td><td>27.86</td><td>0.7460</td><td>27.13</td><td>0.8167</td><td>31.75</td><td>0.9219</td></tr><tr><td>DFSA+ [33]</td><td>2021</td><td>×4</td><td>32.79</td><td>0.9019</td><td>29.06</td><td>0.7922</td><td>27.87</td><td>0.7458</td><td>27.17</td><td>0.8163</td><td>31.88</td><td>0.9266</td></tr><tr><td rowspan="2">RCAN-it RCAN-it+</td><td rowspan="2">2021</td><td rowspan="2">×4</td><td>32.69</td><td>0.9007</td><td>28.99</td><td>0.7922</td><td>27.87</td><td>0.7459</td><td>27.16</td><td>0.8168</td><td>31.78</td><td>0.9217</td></tr><tr><td>32.81</td><td>0.9021</td><td>29.08</td><td>0.7933</td><td>27.91</td><td>0.7469</td><td>27.34</td><td>0.8201</td><td>32.04</td><td>0.9233</td></tr></table>
246
+
247
+ Table 7. Quantitative comparison with SwinIR [26], a transformer-based SR model. PSNR (dB) and SSIM are shown for the $\times 2$ SR task on the Manga109 dataset. Inference time are benchmarked on a Nvidia V100 GPU for $\times 2$ SR with $256\times 256$ input patches. SwinIR-S denotes a lightweight version of the standard SwinIR model. Symbol + denotes self-ensemble results.
248
+
249
+ <table><tr><td rowspan="2">Methods</td><td rowspan="2">Time (Sec.)</td><td rowspan="2">Rel. RCAN</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td></tr><tr><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td><td>PSNR</td><td>SSIM</td></tr><tr><td>RCAN+ [50]</td><td>0.1645</td><td>1.00</td><td>38.33</td><td>0.9617</td><td>34.23</td><td>0.9225</td><td>32.46</td><td>0.9031</td><td>33.54</td><td>0.9399</td><td>39.61</td><td>0.9788</td></tr><tr><td>SwinIR+ [26]</td><td>0.7137</td><td>4.34</td><td>38.46</td><td>0.9624</td><td>34.61</td><td>0.9260</td><td>32.55</td><td>0.9043</td><td>33.95</td><td>0.9433</td><td>40.02</td><td>0.9800</td></tr><tr><td>SwinIR-S+ [26]</td><td>0.4747</td><td>2.89</td><td>38.19</td><td>0.9613</td><td>33.96</td><td>0.9211</td><td>32.34</td><td>0.9015</td><td>32.90</td><td>0.9349</td><td>39.23</td><td>0.9783</td></tr><tr><td>RCAN-it+ (Ours)</td><td>0.1655</td><td>1.01</td><td>38.41</td><td>0.9621</td><td>34.55</td><td>0.9254</td><td>32.51</td><td>0.9038</td><td>33.79</td><td>0.9421</td><td>40.04</td><td>0.9801</td></tr></table>
250
+
251
+ sults suggest that using warm start with finetuning instead of training from scratch can be a reasonable practice for real-world applications with a cost (time) budget.
252
+
253
+ # 4.3. Benchmark Comparison
254
+
255
+ So far, we have identified a proper training protocol to improve RCAN, which has been released for more than three years. In this section, we compare our RCAN-it with existing CNN-based and transformer-based SR models that have defined the state-of-the-art in the past several years and discuss the pros and cons of existing approaches.
256
+
257
+ Visual comparison. In a visual comparison (Fig. 3) of $\times 4$ SR on Urban100, we show that RCAN-it can reconstruct high-frequency details better than not only RCAN [50], but also more recent works including IGNN [54], CSNLN [36] and RFANet [30]. Specifically, RCAN-it reconstructs the strip patterns with higher contrast, while other approaches tend to hallucinate unrealistic artifacts (Fig. 3, 4th row). Besides, structures with lower contrast from the background can be eliminated in previous models, while our RCAN-it can preserve them (Fig. 3, 1st row). Please note that except for the small modification of activation function, our RCAN-it compares favorably with existing approaches in reconstructing high-frequency details just with better training instead of introducing new modules or structures exclusively for learning high-frequency components.
258
+
259
+ Quantitative comparison. Table 6 shows the comprehensive quantitative comparison of RCAN-it with existing CNN-based SR approaches released in the past four years. The numbers are without self-ensemble by default. DFSA [33] only reports self-ensemble scores, therefore a fair comparison is $\mathrm{DFSA + }$ vs. RCAN-it+. The quantitative results show that even for models released three years after RCAN, our simply improved RCAN-it can outperform or match the performance, with or without self-ensemble. Please note that we are not claiming the advance in SR architectures is not important. On the contrary, we champion the success of novel architectures that pushes the field forward. Our results show that similar or higher empirical improvement can be achieved by upgrading the training, and we believe the practical optimization solution we provided
260
+
261
+ can help the latest models shine in the field.
262
+
263
+ Comparison with SwinIR. Finally, we show the comparison with a concurrent work, SwinIR [26], which adapts Swin Transformer [31] for image restoration (Table 7). SwinIR is the current state-of-the-art to the best of our knowledge, outperforming RCAN by $0.41\mathrm{dB}$ on both Urban100 and Manga109 for the $\times 2$ SR task. With an improved training strategy, our RCAN-it has clearly narrowed the gap between RCAN and SwinIR on Urban100 and even outperformed SwinIR on Manga109. Besides, we show that despite the impressive SR performance, SwinIR suffers from a slow inference speed. When benchmarking on a single Nvidia V100 GPU, the standard version of SwinIR is $4.34\times$ slower than RCAN, and even a lightweight version SwinIR-S is still $2.89\times$ slower but with significantly decreased performance. Compared with SwinIR, the $1\%$ extra inference time due to SiLU in our RCAN-it is negligible. In real-world applications where SR quality and inference speed are weighted differently, RCAN-it will still be a reasonable choice considering the quality-speed tradeoff.
264
+
265
+ # 4.4. Discussion
266
+
267
+ We have compared RCAN-it with existing SR models and demonstrate the simplicity but surprising effectiveness of updated training strategies. The oracle study also indicates the large space for improving SR models from different aspects besides architecture (Table. 4). One current limitation is that we have not tested our improved training protocol with the latest models. We believe at least recent CNN-based models can enjoy performance gain by adapting our solution and leave that for future exploration. We also want to emphasize that the underfitting problem and the empirical improvements are strongly correlated with the DF2K [27] dataset for training the models. Researchers may have different observations using different training data.
268
+
269
+ # 5. Conclusion and Future Work
270
+
271
+ In this work, we revisit the standard RCAN [50] and demonstrate that with a proper training strategy, we can alleviate the underfitting problem and increase the image super-resolution performance by a large margin. This up
272
+
273
+ date makes our RCAN-it (RCAN with improved training) better than or comparable with the latest CNN-based approaches, demonstrating the essentiality of training besides architecture. Recently, the popularity of vision transformer [10] has inspired a wave of transformer-based architectures in both high-level [10, 31] and low-level [26] tasks. However, Dosovitskiy et al. [10] shows that the optimal setting for training CNN-based classifiers and vision transformers can be very different. In future research, we will also explore the training of transformers in SR.
274
+
275
+ Acknowledgements. We thank the support from NSF award IIS-2124179 and NIH award 5U54CA225088-03.
276
+
277
+ # References
278
+
279
+ [1] Jordan Ash and Ryan P Adams. On warm-starting neural network training. Advances in Neural Information Processing Systems, 33, 2020. 11
280
+ [2] Irwan Bello, William Fedus, Xianzhi Du, Ekin D Cubuk, Aravind Srinivas, Tsung-Yi Lin, Jonathon Shlens, and Barret Zoph. Revisiting resnets: Improved training and scaling strategies. arXiv preprint arXiv:2103.07579, 2021. 1, 2, 3, 5
281
+ [3] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In BMVC, 2012. 4, 5
282
+ [4] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In CVPRW, 2020. 2
283
+ [5] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In ICCV, 2017. 11
284
+ [6] Tao Dai, Jianrui Cai, Yongbing Zhang, Shu-Tao Xia, and Lei Zhang. Second-order attention network for single image super-resolution. In CVPR, 2019. 1, 2, 7
285
+ [7] Tao Dai, Hua Zha, Yong Jiang, and Shu-Tao Xia. Image super-resolution via residual block attention networks. In CVPRW, 2019. 1, 2
286
+ [8] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Image super-resolution using deep convolutional networks. TPAMI, 2016. 1, 2
287
+ [9] Chao Dong, Chen Change Loy, and Xiaou Tang. Accelerating the super-resolution convolutional neural network. In ECCV, 2016. 1
288
+ [10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 9
289
+ [11] Yuchen Fan, Jiahui Yu, Yiqun Mei, Yulun Zhang, Yun Fu, Ding Liu, and Thomas S Huang. Neural sparse representation for image restoration. In NeurIPS, 2020. 7
290
+ [12] William T Freeman, Thouis R Jones, and Egon C Pasztor. Example-based super-resolution. IEEE Computer graphics and Applications, 22(2):56-65, 2002. 2
291
+ [13] Priya Goyal, Piotr Dólár, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch,
292
+
293
+ Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017. 2
294
+ [14] Muhammad Haris, Greg Shakhnarovich, and Norimichi Ukita. Deep back-projection networks for super-resolution. In CVPR, 2018. 7
295
+ [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 1, 2, 3
296
+ [16] Tong He, Zhi Zhang, Hang Zhang, Zhongyue Zhang, Jun-yuan Xie, and Mu Li. Bag of tricks for image classification with convolutional neural networks. In CVPR, 2019. 1, 2, 3, 5
297
+ [17] Xiangyu He, Zitao Mo, Peisong Wang, Yang Liu, Mingyuan Yang, and Jian Cheng. Ode-inspired network design for single image super-resolution. In CVPR, 2019. 7
298
+ [18] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 2, 3, 11
299
+ [19] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In CVPR, 2018. 2, 3
300
+ [20] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, 2016. 2, 3, 5
301
+ [21] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In CVPR, 2015. 4, 6
302
+ [22] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In CVPR, 2016. 1, 2
303
+ [23] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2014. 2, 3, 4
304
+ [24] Christian Ledig, Lucas Theis, Ferenc Huszar, Jose Caballero, Andrew Cunningham, Alejandro Acosta, Andrew Aitken, Alykhan Tejani, Johannes Totz, Zehan Wang, and Wenzhe Shi. Photo-realistic single image super-resolution using a generative adversarial network. In CVPR, 2017. 1
305
+ [25] Zhen Li, Jinglei Yang, Zheng Liu, Xiaomin Yang, Gwanggil Jeon, and Wei Wu. Feedback network for image superresolution. In CVPR, 2019. 6, 7, 13
306
+ [26] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. Swinir: Image restoration using swim transformer. In ICCVW, 2021. 2, 3, 4, 5, 8, 9
307
+ [27] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In CVPRW, 2017. 2, 3, 4, 6, 7, 8, 12, 13
308
+ [28] Zachary C Lipton and Jacob Steinhardt. Troubling trends in machine learning scholarship. arXiv preprint arXiv:1807.03341, 2018. 1
309
+ [29] Ding Liu, Bihan Wen, Yuchen Fan, Chen Change Loy, and Thomas S Huang. Non-local recurrent network for image restoration. In NeurIPS, 2018. 7
310
+ [30] Jie Liu, Wenjie Zhang, Yuting Tang, Jie Tang, and Gangshan Wu. Residual feature aggregation network for image superresolution. In CVPR, 2020. 6, 7, 8, 13
311
+
312
+ [31] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030, 2021. 2, 8, 9
313
+ [32] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016. 3
314
+ [33] Salma Abdel Magid, Yulun Zhang, Donglai Wei, Won-Dong Jang, Zudi Lin, Yun Fu, and Hanspeter Pfister. Dynamic high-pass filtering and multi-spectral attention for image super-resolution. In ICCV, 2021. 2, 4, 7, 8
315
+ [34] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In ICCV, 2001. 4
316
+ [35] Yusuke Matsui, Kota Ito, Yuji Aramaki, Azuma Fujimoto, Toru Ogawa, Toshihiko Yamasaki, and Kiyoharu Aizawa. Sketch-based manga retrieval using manga109 dataset. Multimedia Tools and Applications, 2017. 2, 4, 5
317
+ [36] Yiqun Mei, Yuchen Fan, Yuqian Zhou, Lichao Huang, Thomas S Huang, and Humphrey Shi. Image superresolution with cross-scale non-local attention and exhaustive self-exemplars mining. In CVPR, 2020. 6, 7, 8, 13
318
+ [37] Ben Niu, Weilei Wen, Wenqi Ren, Xiangde Zhang, Lianping Yang, Shuzhen Wang, Kaihao Zhang, Xiaochun Cao, and Haifeng Shen. Single image super-resolution via a holistic attention network. In ECCV, 2020. 2, 7
319
+ [38] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. NeurIPS, 2019. 3, 4, 12
320
+ [39] Prajit Ramachandran, Barret Zoph, and Quoc V. Le. Swish: a self-gated activation function. arXiv: Neural and Evolutionary Computing, 2017. 2, 3
321
+ [40] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In CVPR, 2016. 4
322
+ [41] Deqing Sun, Daniel Vlasic, Charles Herrmann, Varun Jampani, Michael Krainin, Huiwen Chang, Ramin Zabih, William T Freeman, and Ce Liu. Autoflow: Learning a better training set for optical flow. In CVPR, 2021. 1
323
+ [42] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Models matter, so does training: An empirical study of cnns for optical flow estimation. TPAMI, 2019. 1
324
+ [43] Radu Timofte, Eirikur Agustsson, Luc Van Gool, Ming-Hsuan Yang, Lei Zhang, Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, Kyoung Mu Lee, et al. Ntire 2017 challenge on single image super-resolution: Methods and results. In CVPRW, 2017. 4
325
+ [44] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 2
326
+ [45] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004. 4
327
+
328
+ [46] Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017. 2
329
+ [47] Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019. 2, 3, 4
330
+ [48] Roman Zeyde, Michael Elad, and Matan Protter. On single image scale-up using sparse-representations. In Proc. 7th Int. Conf. Curves Surf., 2010. 4
331
+ [49] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412, 2017. 2, 3
332
+ [50] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In ECCV, 2018. 1, 2, 3, 4, 6, 7, 8, 11, 13
333
+ [51] Yulun Zhang, Kunpeng Li, Kai Li, Bineng Zhong, and Yun Fu. Residual non-local attention networks for image restoration. In ICLR, 2019. 7
334
+ [52] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In CVPR, 2018. 1, 2, 6, 7, 13
335
+ [53] Yulun Zhang, Donglai Wei, Can Qin, Huan Wang, Hanspeter Pfister, and Yun Fu. Context reasoning attention network for image super-resolution. In ICCV, 2021. 2, 4, 7
336
+ [54] Shangchen Zhou, Jiawei Zhang, Wangmeng Zuo, and Chen Change Loy. Cross-scale internal graph neural network for image super-resolution. In NeurIPS, 2020. 6, 7, 8, 13
337
+
338
+ Table 8. Quantitative comparison between updated RCAN baseline, RCAN-it, and RCAN-it $\star$ . Results are evaluated by the PSNR (dB) metric for $\times 2$ SR with self-ensemble. RCAN-it $\star$ improves RCAN-it with even longer large-patch finetuning.
339
+
340
+ <table><tr><td>Method</td><td>Set5</td><td>Set14</td><td>B100</td><td>Urban100</td><td>Manga109</td></tr><tr><td>RCAN</td><td>38.35</td><td>34.42</td><td>32.46</td><td>33.61</td><td>39.87</td></tr><tr><td>RCAN-it</td><td>38.41</td><td>34.55</td><td>32.51</td><td>33.79</td><td>40.04</td></tr><tr><td>RCAN-it*</td><td>38.42</td><td>34.57</td><td>32.53</td><td>33.87</td><td>40.10</td></tr></table>
341
+
342
+ # A. Additional Experiments
343
+
344
+ In Table 3 we have demonstrated the (positive and negative) effects of different techniques when working with the RCAN [50] model. This section shows an even longer training schedule, a comparison between training from scratch and warm-start, and two additional techniques we tested, which can be helpful data points for future work.
345
+
346
+ Even Longer Training. In Sec. 4 we showed that the updated RCAN baseline is optimized for $80\mathrm{K}$ iterations, and our RCAN-it is optimized for $200\mathrm{K}$ iterations (160K training + 40K large-patch finetuning). RCAN-it is trained on 4 Nvidia V100 GPUs for 4 days and outperforms nearly all CNN-based approaches published in the last three years after the release of RCAN [50]. Here we show another version called RCAN-it $\star$ , which is finetuned for an additional $160\mathrm{K}$ iterations using the large-patch scheme (i.e., $64 \times 64$ ), which is a total of $360\mathrm{K}$ iterations. The total training time is similar to the original protocol (about 7 days). We show that the finetuning can further increase the performance on all benchmark sets (Table. 8), again demonstrating our claim that the model performance is restricted by underfitting. Specifically, RCAN-it $\star$ achieves a state-of-the-art PSNR of 40.10 for $\times 2$ SR on the Manga109 dataset.
347
+
348
+ Our results illustrate that using the large-batch scheme can effectively improve the performance of RCAN by a large margin with the same time budget. In the follow-up research, we will investigate how to improve training efficiency while keeping or improving current best results.
349
+
350
+ From scratch vs. warm start. In Sec. 3 of the main text, we describe how we use warm start, which initialize $\times 3$ and $\times 4$ models with pretrained $\times 2$ SR model weights to save training time. However, we are also aware of works in image classification showing that warm-starting can yield worse generalization performance than models trained from scratch [1]. Therefore we also investigate if warm-starting influences the performance of RCAN in SR.
351
+
352
+ Table 9 shows that for the $\times 4$ SR task, warm-start (80K iterations) achieves better or comparable results as training from scratch (160K iterations) while saving $50\%$ training time. This observation demonstrates that warm-start is an ecological choice for training deep SR architectures and suggests the differences between image recognition and
353
+
354
+ Table 9. Quantitative comparison between training from scratch and warm-start. Results are evaluated by the PSNR (dB) metric for $\times 4$ SR with self-ensemble. Warm-start can save the training time by $50\%$ compared with training from scratch.
355
+
356
+ <table><tr><td>Method</td><td>Set5</td><td>Set14</td><td>B100</td><td>Urban100</td><td>Manga109</td></tr><tr><td>Scratch</td><td>32.81</td><td>29.08</td><td>27.91</td><td>27.32</td><td>31.98</td></tr><tr><td>Warm-start</td><td>32.81</td><td>29.08</td><td>27.91</td><td>27.33</td><td>32.01</td></tr></table>
357
+
358
+ super-resolution models. The good results of warm-start may also be because the training data for different scales are shared (with different down-sampling ratios), and we will explore that in future work.
359
+
360
+ Rejection Sampling. Existing SR frameworks generate training data by randomly sampling patches uniformly from all positions. However, image regions with a limited variance of shape and texture (e.g., sky) may occupy a large portion of the image and get sampled with high probability, which may not contribute much to the training. Therefore we test a rejection sampling scheme to let the model focus on relatively challenging regions. Specifically, given a sampled pair of low-resolution (LR) and high-resolution (HR) patches, we upsample the low-resolution patch with bicubic interpolation and calculate the PSNR between the upsampled patch and HR patch (please note that the LR images are also downsampled with the bicubic kernel). Higher PSNR means the patch is less challenging as bicubic sampling can already achieve a satisfactory score. Therefore we reject this pair of training patches with a probability $p$ and sample another pair instead. We calculate the statistics from $5,000 \times 48$ input patches for the $\times 2$ SR task, and notice the average PSNR score is around 24. Therefore we set the threshold to 24 and $p = 0.8$ so that a sampled patch with $\mathrm{PSNR} \geq 24.0$ is rejected with a probability of 0.8.
361
+
362
+ We show the results in Table 10. Different from what we expected, the results are worse using the rejection sampling technique. We argue that since the problem restricting model performance is underfitting as we demonstrated in Figure 2 and Table 3, changing the input distribution can have a negative influence as some relatively easy regions are still not well fitted by the RCAN model.
363
+
364
+ Deformable Convolution. Besides the SiLU activation [18], we also tested another architecture modification called deformable convolution [5] in the RCAN model. The idea of deformable convolution is to alleviate the grid structure of convolution operation to increase the field-of-view (FoV). To test if this design can help RCAN in the SR tasks, we substitute the final convolution layer in each residual group (RG) to a deformable convolution layer. The results in Table 10 show that deformable convolution decreases the baseline performance. We argue that learning the offset in the deformable convolution layers increases the difficulty in
365
+
366
+ Table 10. Additional ablation studies of model and training options. The impact of architecture modifications and training strategies are shown upon the updated RCAN baseline (highlighted in Table 2) for $\times 2$ SR. Results are produced with self-ensemble.
367
+
368
+ <table><tr><td rowspan="2">Configuration</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">B100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td><td colspan="2">Average</td></tr><tr><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td><td>PSNR</td><td>Δ</td></tr><tr><td>Baseline</td><td>38.350</td><td>-</td><td>34.415</td><td>-</td><td>32.463</td><td>-</td><td>33.610</td><td>-</td><td>39.866</td><td>-</td><td>35.741</td><td>-</td></tr><tr><td>w/ Deformable Conv.</td><td>38.153</td><td>-0.197</td><td>33.829</td><td>-0.586</td><td>32.288</td><td>-0.175</td><td>32.632</td><td>-0.977</td><td>39.294</td><td>-0.572</td><td>35.239</td><td>-0.501</td></tr><tr><td>w/ Rejection Sampling</td><td>38.331</td><td>-0.019</td><td>34.371</td><td>-0.044</td><td>32.451</td><td>-0.012</td><td>33.573</td><td>-0.037</td><td>39.763</td><td>-0.103</td><td>35.698</td><td>-0.043</td></tr></table>
369
+
370
+ optimizing the deep SR architecture.
371
+
372
+ # B. Visual Comparison
373
+
374
+ In Figure 3, we show the visual comparisons with existing CNN-based approaches on the $\times 4$ SR task. Here, we show more visual comparisons with previous SR models on the Urban100 dataset in Figure 4. Our RCAN-it (i.e., RCAN with improved training) can better restore the high-frequency details that are smoothed in LR images.
375
+
376
+ # C. Code
377
+
378
+ We release the code (with MIT License) to reproduce all our experiments in this paper. Our implementation is mainly based on PyTorch [38], also with public code from (1) EDSR² [27] official implementation (with MIT License), (2) Detectron²³ (with Apache-2.0 License) and (3) torch-optimizer⁴ (with Apache-2.0 License). We thank the contributors of the those libraries.
379
+
380
+ ![](images/d182c3bdf9a0378aafc1393e283f6c179976860b24cf50b59ed6db76997fab52.jpg)
381
+ Urban100: img_062 $(\times 4)$
382
+
383
+ ![](images/dea0eb781eb02efde9810eee7928eb27aebd22369d8f430f2be1abbda5001bbd.jpg)
384
+ HO
385
+
386
+ ![](images/62d9d088f4a80794c5730a474c1071f7990228600a85b69eff996a10183c991d.jpg)
387
+ Bicubic
388
+
389
+ ![](images/b868691513e7d269ea8a98b76a78b487a4ae4286ece4f06c47e4c29361b965f9.jpg)
390
+ EDSR [27]
391
+
392
+ ![](images/998f94a7cf520bf3e848e2a49d82e19c2781e95c7d4f690dd7ae47980864509e.jpg)
393
+ RDN [52]
394
+
395
+ ![](images/124dfea66c202fb8e7b3b26f634bfb82394a63527315cc4e0fb30622263850b9.jpg)
396
+ RCAN [50]
397
+
398
+ ![](images/a9e62aa7152c53bf036b3735f9ef64bf5d0f60cc3ded7f56511d07fc62fcf00e.jpg)
399
+ Urban100: img_069 $(\times 4)$
400
+
401
+ ![](images/a6c1d2e1af24cace271d48ed50ad594a5e2c98d1de40dab106c6e373a9b6383d.jpg)
402
+ HO
403
+
404
+ ![](images/0108109f8f2c054e90062b5f547894e1596b2af05a5a691867edc3d9e940efbc.jpg)
405
+ Bicubic
406
+
407
+ ![](images/600e1b9326721ff73404508b2469b8c13a04512093c2cc968ae3f05eeef92502.jpg)
408
+ EDSR [27]
409
+
410
+ ![](images/1751051036623d8a6ee2000b17c4c2818218fcbb387b90a7e65edad643adb80e.jpg)
411
+ RDN [52]
412
+
413
+ ![](images/9477fe9a0973447211cc7878b8b41993eb99005334dd396328b97c1bfe1ad8be.jpg)
414
+ RCAN [50]
415
+
416
+ ![](images/162195fc969cf453dec213806bd1e35d3cda42410f48e63b6b88023685b65e1f.jpg)
417
+ Urban100: img_076 $(\times 4)$
418
+
419
+ ![](images/7e66955e8b37b0d769525817c8c86b2f9af261cb2dd2eaa9c810b17530a97cee.jpg)
420
+ HQ
421
+
422
+ ![](images/78b759184f6cf49178cb45a576ef1e22f7dac2fe864aa20fcc888253d33e9202.jpg)
423
+ Bicubic
424
+
425
+ ![](images/03b4da2092fee724e409cce360962abb6c689757e91830d0660153447c377ca0.jpg)
426
+ EDSR [27]
427
+
428
+ ![](images/b923e2b1679a4837acb0d663aae7a71cfb11657e56b0906911bfb2fbb228394b.jpg)
429
+ RDN [52]
430
+
431
+ ![](images/a65f49f8f1e656e1fdfffb838da672515333be67d474d3a2ee80c8e8e8a7060d.jpg)
432
+ RCAN [50]
433
+
434
+ ![](images/a08618225857a68cab4ec08cf491712873834c434087acfc7009accae4599f52.jpg)
435
+ Urban100: img_083 $(\times 4)$
436
+
437
+ ![](images/5c9e58302509c6ff44c4a14fa636aba10fb67cb2c273af4ec3673e8df5614769.jpg)
438
+ HO
439
+
440
+ ![](images/9fa97dda14dc83cd4909520630a1d12d8926d743f0bb576ed1e5766c6f3d463f.jpg)
441
+ Bicubic
442
+
443
+ ![](images/ef5a0e4d3dc4a0da1872941a0a31edfb3a3d1974721e231df139eeb2960f355a.jpg)
444
+ EDSR [27]
445
+
446
+ ![](images/f50a1821b617b634560cbf6c6ccf24fd89d5b1604cc2e92cc6cdf55d03f0aecf.jpg)
447
+ RDN [52]
448
+
449
+ ![](images/dd9d26db044a3ead048ed719e05dbfe16884941b27e4ecc2967389bad0488b85.jpg)
450
+ RCAN [50]
451
+
452
+ ![](images/4a27348e6479e665594e8f113504a5629212fdd44019a3f1dec2e69e819321fb.jpg)
453
+ Urban100: img_097 $(\times 4)$
454
+
455
+ ![](images/7d2eb29abdc9c0333d000ff10c0c2a9911ee7965007fa4a6d4d02e7b4306d92c.jpg)
456
+
457
+ ![](images/a69b2dd33f56284288111239e45a15053f5beeda9d69d1a73e83b7c9999d1e22.jpg)
458
+
459
+ ![](images/420306b30d1d18982431140142a42e2be0f286da9453ac8c630f5708f5626e0b.jpg)
460
+
461
+ ![](images/f845d4876acb3d2563f71348676c99e35ef7f8829fb69b4fdf27ae3012bc84a2.jpg)
462
+
463
+ ![](images/50556a6c92b211bb807287459b5714db0e1a0cbaccab21bb63255784f7287b3b.jpg)
464
+
465
+ ![](images/26f325ce6ba06ac040d522ba63db8564072fa27a90bb7e41fa47f8a7572ea62b.jpg)
466
+ Urban100: img_099 $(\times 4)$
467
+ Figure 4. Additional visual comparisons for the $4 \times$ SR task on the Urban100 dataset. Our RCAN-it (RCAN with improved training) cannot reconstruct high-frequency details compared with previous approaches.
468
+
469
+ ![](images/031236bf8533d0db1256c06e08b18b5928ab32f4c01cca7fd8067775c6730dde.jpg)
470
+ HQ
471
+ SRFBN [25]
472
+
473
+ ![](images/843188f0ce1ee17139a7daa5dbddec9c5b4af092af470ad23e4d6dd9c9e2632b.jpg)
474
+ Bicubic
475
+ IGNN [54]
476
+
477
+ ![](images/2e7039fc7926b1908f1b8ed56e96a5ccd17065ac8b5e884c2ba66dcc02c06924.jpg)
478
+ EDSR [27]
479
+ CSNLN [36]
480
+
481
+ ![](images/905044ac3a1326c04c77b601be27359d3eeec311bd9a0179eb0377896c2ee041.jpg)
482
+ RDN [52]
483
+ RFANet [30]
484
+
485
+ ![](images/e6cb745dea035908f453033349d812bfb5c29e71437c8a0542a358a6759b85ed.jpg)
486
+ RCAN [50]
487
+ RCAN-it (ours)
2201.11xxx/2201.11279/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f3ea24502b9c03bf19a6f8bac7e4ab4980695f2d6c1d3565cbea7d2903edbf8
3
+ size 1344234
2201.11xxx/2201.11279/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_content_list.json ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Network Slicing with MEC and Deep",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 192,
8
+ 70,
9
+ 803,
10
+ 104
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Reinforcement Learning for the Internet of Vehicles",
17
+ "text_level": 1,
18
+ "bbox": [
19
+ 81,
20
+ 107,
21
+ 916,
22
+ 138
23
+ ],
24
+ "page_idx": 0
25
+ },
26
+ {
27
+ "type": "text",
28
+ "text": "Zoubeir Mlika, Member, IEEE, and Soumaya Cherkoui, Senior Member, IEEE",
29
+ "bbox": [
30
+ 200,
31
+ 147,
32
+ 794,
33
+ 164
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "Abstract—The interconnection of vehicles in the future fifth generation (5G) wireless ecosystem forms the so-called Internet of vehicles (IoV). IoV offers new kinds of applications requiring delay-sensitive, compute-intensive and bandwidth-hungry services. Mobile edge computing (MEC) and network slicing (NS) are two of the key enabler technologies in 5G networks that can be used to optimize the allocation of the network resources and guarantee the diverse requirements of IoV applications.",
40
+ "bbox": [
41
+ 73,
42
+ 220,
43
+ 491,
44
+ 321
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "As traditional model-based optimization techniques generally end up with NP-hard and strongly non-convex and non-linear mathematical programming formulations, in this paper, we introduce a model-free approach based on deep reinforcement learning (DRL) to solve the resource allocation problem in MEC-enabled IoV network based on network slicing. Furthermore, the solution uses non-orthogonal multiple access (NOMA) to enable a better exploitation of the scarce channel resources. The considered problem addresses jointly the channel and power allocation, the slice selection and the vehicles selection (vehicles grouping). We model the problem as a single-agent Markov decision process. Then, we solve it using DRL using the well-known DQL algorithm. We show that our approach is robust and effective under different network conditions compared to benchmark solutions.",
51
+ "bbox": [
52
+ 73,
53
+ 321,
54
+ 491,
55
+ 511
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "I. INTRODUCTION",
62
+ "text_level": 1,
63
+ "bbox": [
64
+ 222,
65
+ 536,
66
+ 343,
67
+ 549
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "The Internet of vehicles (IoV) is an emerging concept that enhances the existing capabilities of vehicular communication by integrating with the Internet of things (IoT). IoV is a key use-case in the upcoming beyond fifth generation (5G) wireless networks [1, 2]. IoV creates diverse new applications with extremely diverse service requirements including ultrahigh reliable and delay-sensitive, bandwidth-hungry as well as compute-intensive applications [3]. For example, accident reports require ultra-reliable and extremely low latency whereas high definition map sharing require high bandwidth. An important open question in today's IoV networks is \"how to support, using a unified air interface, future IoV services while guaranteeing their extremely diverse performance requirements?\" Network slicing (NS) is a potential solution to respond to this question [4-6]. NS is a tool that enables network operators to support virtualized end-to-end networks that belongs to the principle of software defined networking [7]. It mainly allows creating different logical networks on the top of a common and programmable physical infrastructure. Another technology, namely mobile edge computing, or better known as multi-access edge computing (MEC), is considered as an",
74
+ "bbox": [
75
+ 73,
76
+ 554,
77
+ 491,
78
+ 875
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Zoubeir Mlika and Soumaya Cherkaoui are with the research laboratory on intelligent engineering for communications and networking (INTERLAB), Faculty of Engineering, Department of Electrical and Computer Science Engineering, University of Sherbrooke, Sherbrooke J1K 2R1, Quebec, Canada, (e-mail: zoubeir.mlika@usherbrooke.ca, soumaya.cherkaoui@usherbrooke.ca).",
85
+ "bbox": [
86
+ 73,
87
+ 882,
88
+ 491,
89
+ 941
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "important building block in the future IoV ecosystem. The joint implementation of NS and MEC is a key enabler for IoV networks. These two technologies can be used not only to guarantee the diverse requirements of IoV applications but also to deploy the diverse vehicular services at the appropriate locations [3].",
96
+ "bbox": [
97
+ 501,
98
+ 219,
99
+ 921,
100
+ 309
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "Optimal resource allocation in IoV would go through traditional model-based optimization techniques. Due to the complex and highly dynamic nature of IoV, such a model-based approach is not very appealing. In fact, such approach ends up with strongly non-convex optimization problems that are generally NP-hard [8]. Thus, a model-free machine learning approach is crucial.",
107
+ "bbox": [
108
+ 501,
109
+ 310,
110
+ 921,
111
+ 415
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Reinforcement learning (RL) is a useful technique in solving NP-hard optimization problems. It has been applied successfully to solve very hard problems in different research areas including wireless networks [9]. It is based on Markov decision process (MDP) modeling where agents learn to select the best actions through repeated interactions with an unknown environment by receiving numerical reward signals [8]. Deep RL (DRL) uses the strong ability of neural networks to generalize across enormous state spaces and reduce the complexity of a solution, thus improving the learning process.",
118
+ "bbox": [
119
+ 501,
120
+ 416,
121
+ 921,
122
+ 566
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "In this paper, using DRL, we propose a new solution framework to solve the challenging problem of resource allocation in a MEC-enabled IoV network. More specifically, we focus on the in-coverage scenario of 5G-new radio (5G-NR) in which vehicles communicate with each other through a base station, e.g., NodeB (gNB), that performs MEC-based tasks [10]. We focus on the broadcast communication technique. Due to the scarce spectrum resources, non-orthogonal multiple access (NOMA) is also used in our proposed framework. NOMA is a promising technique to increase the spectral efficiency in vehicular networks [11].",
129
+ "bbox": [
130
+ 501,
131
+ 566,
132
+ 921,
133
+ 733
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "In more detail, the considered resource allocation problem, called IoV resource allocation (IoVRA), involves the allocation of four resources: the slice (deciding which packet to send), the coverage of the broadcast (deciding the range of the broadcast), the resource blocks (RBs), and the power. By carefully allocating these four resources, and by applying the successive interference cancellation (SIC) at the corresponding destination vehicles, NOMA can help in boosting the capacity of the IoV network. The use of NOMA in broadcast communications is different from the usual uplink and downlink NOMA techniques, which is due from the broadcast nature in IoV networks, i.e., two source vehicles broadcast with two distinct transmission powers to the same group of destination vehicles.",
140
+ "bbox": [
141
+ 501,
142
+ 733,
143
+ 921,
144
+ 944
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "page_number",
150
+ "text": "1",
151
+ "bbox": [
152
+ 911,
153
+ 30,
154
+ 919,
155
+ 39
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "aside_text",
161
+ "text": "arXiv:2201.11295v1 [cs.NI] 27 Jan 2022",
162
+ "bbox": [
163
+ 22,
164
+ 276,
165
+ 57,
166
+ 707
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "footer",
172
+ "text": "Copyright (c) 2020 IEEE. Personal use of this material is permitted. However, permission to use this material for any other purposes must be obtained from the IEEE by sending a request to pubs-permissions@ieee.org.",
173
+ "bbox": [
174
+ 76,
175
+ 957,
176
+ 924,
177
+ 982
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "image",
183
+ "img_path": "images/5c5e51abb5c4aecc5121cfa7f84630892678529f42bc73b026ac25cf2e54e9e6.jpg",
184
+ "image_caption": [
185
+ "Fig. 1: Two network slices in an IoV-based MEC network."
186
+ ],
187
+ "image_footnote": [],
188
+ "bbox": [
189
+ 91,
190
+ 66,
191
+ 923,
192
+ 534
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "text",
198
+ "text": "Even though we propose a MEC-based IoV solution for the case of vehicle-to-vehicle (V2V) communications, our proposed system model is valid for vehicle-to-infrastructure (V2I) communications as well. Indeed, in V2I communications, a vehicle communicates with a gNB-type road side unit (RSU) or a user-type RSU through the cellular Uu or the sidelink (SL) connectivity [12]. For the case of user-type RSU communications, the coverage range selection decision will simply include the RSU. For the case of gNB-type RSU communications, the broadcast coverage range selection could be ignored and replaced by RSU association. Thus, our proposed solution framework is still valid for both V2V and V2I communications.",
199
+ "bbox": [
200
+ 73,
201
+ 582,
202
+ 491,
203
+ 777
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "To the best of our knowledge, this is the first work that proposes a model-free DRL framework to solve IoVRA in MEC-enabled IoV networks based on broadcast, NS and NOMA. The contributions of our work are the following. We model IoVRA as a single agent MDP. Next, we propose a deep-Q-learning (DQL) algorithm to solve it. Finally, we show that our proposed DQL algorithm outperforms benchmark algorithms.",
210
+ "bbox": [
211
+ 73,
212
+ 777,
213
+ 491,
214
+ 898
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "A. Organization",
221
+ "text_level": 1,
222
+ "bbox": [
223
+ 504,
224
+ 582,
225
+ 619,
226
+ 597
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "The article is organized as follows. Section II presents the system model, the single agent MDP, and describes the proposed DQL algorithm. Section III presents benchmark algorithmic solutions and gives the simulation results. Finally, section IV draws some conclusions and discusses interesting open research questions.",
233
+ "bbox": [
234
+ 503,
235
+ 601,
236
+ 921,
237
+ 691
238
+ ],
239
+ "page_idx": 1
240
+ },
241
+ {
242
+ "type": "text",
243
+ "text": "II. PROPOSED DRL FOR INTERNET OF VEHICLES",
244
+ "text_level": 1,
245
+ "bbox": [
246
+ 555,
247
+ 709,
248
+ 870,
249
+ 723
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "A. Internet of Vehicles Model",
256
+ "text_level": 1,
257
+ "bbox": [
258
+ 503,
259
+ 728,
260
+ 710,
261
+ 742
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "text",
267
+ "text": "We consider an IoV network composed of a set of source vehicles that generate packets, and a set of destination vehicles that receive packets. All vehicles operate in the in-coverage scenario of 5G-NR [10] and thus they are covered by some gNB that performs edge computing. A source vehicle uses broadcast communications to transmit to a subset of the destination vehicles. The time is slotted into a set of slots. The total bandwidth is divided into a set of frequency slots. A resource block (RB) is given by the pair (frequency, slot).",
268
+ "bbox": [
269
+ 501,
270
+ 747,
271
+ 921,
272
+ 882
273
+ ],
274
+ "page_idx": 1
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "The proposed system model supports several use cases, including advanced driving with trajectory sharing, extended sensors [13] and is valid for both V2V and V2I communications. To provide guaranteed quality of service requirements",
279
+ "bbox": [
280
+ 503,
281
+ 883,
282
+ 921,
283
+ 944
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "page_number",
289
+ "text": "2",
290
+ "bbox": [
291
+ 911,
292
+ 30,
293
+ 919,
294
+ 40
295
+ ],
296
+ "page_idx": 1
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "to the different use cases, NS is used, which is an efficient solution in IoV networks [6]. It mainly creates logical networks on the top of a common and programmable MEC-enabled IoV infrastructure. We create two network slices. The first slice (slice 1) is designed for non-safety applications such as video streaming. The second slice (slice 2) is designed for safety applications such as emergency warnings. An example of the MEC-enabled NS system model is given in Fig. 1, where vehicles communicate with gNBs that are connected to MEC servers. On top of this network infrastructure, two network slices are created to support IoV applications. Slice 1 is designated for high throughput or enhanced mobile broadband communication (eMBB) and slice 2 is designated for ultra-reliable and low latency communication (uRLLC).",
301
+ "bbox": [
302
+ 73,
303
+ 69,
304
+ 491,
305
+ 280
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "Each source vehicle has two different packets for each slice, where slice 1's packet $(\\mathsf{pkt}^n)$ requires high throughput whereas slice 2's packet $(\\mathsf{pkt}^s)$ has stringent latency requirements. For any packet to be delivered successfully, the corresponding source vehicle requires a set of RBs such that the achievable data rates are above the minimum requirements. Packet $\\mathsf{pkt}^n$ can be transmitted using any RBs from the frequency-slot resource pool with a carefully chosen transmission power per each RB. However, $\\mathsf{pkt}^s$ , having an arrival time and a deadline, can be transmitted using any frequency slot but only using slots between its arrival time and deadline with a carefully chosen transmission power per each RB. The wireless channel gain between two vehicles includes fast and slow fading.",
312
+ "bbox": [
313
+ 73,
314
+ 280,
315
+ 491,
316
+ 474
317
+ ],
318
+ "page_idx": 2
319
+ },
320
+ {
321
+ "type": "text",
322
+ "text": "A source vehicle has to decide which packet to send, at what range to broadcast, what RBs to use, and what transmission powers to allocate. The range broadcasting optimization is similar to the classical vehicle clustering [14-17]. To improve the spectral efficiency of the IoV network, we use NOMA to superimpose the transmissions of the source vehicles transmitting to some destination vehicle, which uses SIC to decode the superimposed transmissions.",
323
+ "bbox": [
324
+ 73,
325
+ 477,
326
+ 491,
327
+ 597
328
+ ],
329
+ "page_idx": 2
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "B. Proposed Deep-Q-Learning Algorithm",
334
+ "text_level": 1,
335
+ "bbox": [
336
+ 75,
337
+ 617,
338
+ 359,
339
+ 633
340
+ ],
341
+ "page_idx": 2
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "Vehicles operate in the coverage of gNB with MEC, that collects information about vehicles and performs pilot estimation to obtain the channel statistics. Based on the obtained feedback information, gNB observes the IoV environment and makes decisions. It plays the role of an intelligent entity in a single agent MDP. With the help of DRL, gNB learns to solve efficiently the complicated IoVRA problem. Specifically, gNB implements the well-known DQL approach [18]. DQL has mainly two parts: training and inference. In training, gNB trains a deep-Q-network (DQN), whereas in inference, it takes actions according to its trained DQN. DQL is an improvement of the so-called QL algorithm that is based on a tabular method which creates a table of state-action pairs. QL explores the action space using an exploration policy, e.g., $\\epsilon$ -greedy. Despite the proven effectiveness of QL, it generally fails when the state and action spaces become large as in IoVRA.",
346
+ "bbox": [
347
+ 73,
348
+ 636,
349
+ 491,
350
+ 877
351
+ ],
352
+ "page_idx": 2
353
+ },
354
+ {
355
+ "type": "text",
356
+ "text": "DQL is a promising technique that is proposed to solve the curse of dimensionality in RL by approximating the Q action-value function using deep learning. One way to solve IoVRA is through multi-agent DRL by combining independent QL for",
357
+ "bbox": [
358
+ 73,
359
+ 877,
360
+ 491,
361
+ 939
362
+ ],
363
+ "page_idx": 2
364
+ },
365
+ {
366
+ "type": "text",
367
+ "text": "each agent. That is, each agent tries to learn its own policy based on its own observations and actions while treating all other agents as part of the environment. This badly influences the result of the training as it creates a non-stationary environment that changes as other agents take decisions. For this reason, a MEC-enabled IoV network facilitates the training in such situation by modeling IoVRA as a single agent who performs the training at the edge of the IoV network. The system architecture of the proposed DQN approach is given in Fig. 2, in which gNB and MEC server interact with the IoV environment and take decisions accordingly.",
368
+ "bbox": [
369
+ 501,
370
+ 69,
371
+ 921,
372
+ 234
373
+ ],
374
+ "page_idx": 2
375
+ },
376
+ {
377
+ "type": "text",
378
+ "text": "Before describing in detail DQL, first, IoVRA is modeled as a single agent MDP given by the quadruple: state space, action space, reward function and transition probability. The agent in this MDP is the gNB, which takes an action, receives a reward and moves to the next state based on its interaction with the unknown IoV environment. This interaction helps gNB gain more experiences and improves its accumulated reward.",
379
+ "bbox": [
380
+ 503,
381
+ 234,
382
+ 921,
383
+ 340
384
+ ],
385
+ "page_idx": 2
386
+ },
387
+ {
388
+ "type": "list",
389
+ "sub_type": "text",
390
+ "list_items": [
391
+ "1) The State Space: At any slot, any state of the IoV environment is unknown directly to gNB. Instead, gNB receives an observation from the IoV environment. In our model, an observation includes local channel state information (CSI) and the transmission behavior of the source vehicles. More precisely, an observation includes the large and small-scale fading values between vehicles. These values can be accurately estimated by the destination vehicles and fed back to gNB without significant delay [19]. The observation also includes a decision variable that indicates whether the source vehicles transmitted in previous slots and if so which packet did they transmit. The third observation indicates the number of leftover bits of packets that each source vehicle needs to send (e.g., initially, the number of leftover bits correspond to the packets sizes). The fourth observation element includes the arrival time and the deadline of slice 2 packets.",
392
+ "2) The Action Space: IoVRA is solved in an online fashion where at each slot, gNB makes a decision that includes (i) the broadcast coverage range selection (ii) the slice selection (iii) the RB allocation, and (iv) the power allocation. For (i), we define a discrete set of coverage distances (including zero). Thus, if gNB chooses a coverage distance (or 0), then it will broadcast (or does not) to all destination vehicles within the chosen coverage circle having as radius the indicated range. For (ii), we define a discrete set of packets (including the empty set) that indicates which packet gNB will decide to transmit. At each slot, each source vehicle has three possible choices: it does not transmit, it transmits a slice 1 packet, or it transmits a slice 2 packet. For (iii), the RB allocation is about choosing the frequency slot to be used in the current slot. For (iv), gNB carefully chooses the transmission power per RB. Note that continuous power allocation makes the implementation of DQL more complex and thus, to keep things simple, we use a discrete set of power levels that gNB can use. Finally, the action space of gNB is given by the Cartesian product of these four discrete sets.",
393
+ "3) The Reward Signal: We mainly focus on maximizing the packet reception ratio (PRR) [20] in IoV broadcast networks. PRR is defined in as follows: for one packet and one source vehicle, the PRR is given by the percentage of vehicles with"
394
+ ],
395
+ "bbox": [
396
+ 503,
397
+ 340,
398
+ 921,
399
+ 946
400
+ ],
401
+ "page_idx": 2
402
+ },
403
+ {
404
+ "type": "page_number",
405
+ "text": "3",
406
+ "bbox": [
407
+ 911,
408
+ 31,
409
+ 919,
410
+ 39
411
+ ],
412
+ "page_idx": 2
413
+ },
414
+ {
415
+ "type": "image",
416
+ "img_path": "images/b03b659301edbca6f3a398d4ab61bbb85953f2912a1d5ac8981581c69bbdd50e.jpg",
417
+ "image_caption": [
418
+ "Fig. 2: IoV-based DRL architecture."
419
+ ],
420
+ "image_footnote": [],
421
+ "bbox": [
422
+ 94,
423
+ 66,
424
+ 903,
425
+ 460
426
+ ],
427
+ "page_idx": 3
428
+ },
429
+ {
430
+ "type": "text",
431
+ "text": "successful reception among the total number of receptions. PRR directly relates to the number of successfully received packets. Therefore, our main goal is to maximize the later.",
432
+ "bbox": [
433
+ 73,
434
+ 511,
435
+ 491,
436
+ 555
437
+ ],
438
+ "page_idx": 3
439
+ },
440
+ {
441
+ "type": "text",
442
+ "text": "The reward signal at any slot is the sum of individual rewards of each source vehicle. Hence, the reward signal depends on whether each source vehicle has successfully transmitted its packet or not. Technically, since we aim to maximize the number of successfully received packets, we set the reward to one once a packet is successfully delivered and zero otherwise. However, this leads to poor design since the zero individual reward leads to no useful information for learning. Thus, we build the individual reward design based on the following. When a packet is not successfully delivered or the delivery has not been completed yet, the individual reward is set to the normalized achievable rate between the corresponding vehicles. The normalization is used to upper-bound the reward. When the packet is successfully delivered, the individual reward is set to the chosen upper-bound. In the first case, upper-bounding the individual reward helps gNB acquire useful information for future decisions whereas in the second case, choosing the individual reward to be the upper-bound teaches gNB the best possible decisions to take in the future and helps in maximizing the number of successfully delivered packets. The achievable data rate is calculated based on the signal to interference-plus-noise ratio (sirr) according to uplink NOMA. The overall reward signal that gNB receives is thus the sum of individual rewards of each source vehicle. The goal of DQL is to maximize the cumulative reward over",
443
+ "bbox": [
444
+ 73,
445
+ 556,
446
+ 491,
447
+ 933
448
+ ],
449
+ "page_idx": 3
450
+ },
451
+ {
452
+ "type": "text",
453
+ "text": "the long-run, given some initial state of the IoV environment. This cumulative reward is the sum over many time steps of the weighted rewards where the weight is proportional to some constant called the discount factor. This discount factor makes future rewards more important for gNB agent as their corresponding weight becomes larger. In IoVRA problem, since the proposed MDP model consists of episodes of finite length, i.e., each episode lasts a finite number of slots, IoVRA belongs to the finite horizon set of problems [21]. Further, since we aim to maximize the number of successfully delivered packets, then the MEC-based gNB agent can simply choose the discount factor to be one or a number that is close to one in order to accumulate higher rewards and thus a higher number of successfully delivered packets.",
454
+ "bbox": [
455
+ 501,
456
+ 510,
457
+ 921,
458
+ 720
459
+ ],
460
+ "page_idx": 3
461
+ },
462
+ {
463
+ "type": "list",
464
+ "sub_type": "text",
465
+ "list_items": [
466
+ "4) The Probability Transition: The probability of moving to the next state while being in an old state and taking some action depends on the highly dynamic IoV environment and cannot be explicitly calculated. This transition happens due to the channel coefficients variation and vehicles mobility.",
467
+ "5) Training in DQL: The DQL algorithm is composed of two parts: training and inference. The training is composed of several episodes where each episode spans the number of slots. DQL uses DNNs to approximate the Q function. We leverage DQL with prioritized replay memory andueling. In general experience replay memory helps to remember and use past experiences. Standard replay memory is used to sample experience transitions uniformly without paying attention to the significance of the sampled experiences. Prioritized expe"
468
+ ],
469
+ "bbox": [
470
+ 501,
471
+ 722,
472
+ 921,
473
+ 934
474
+ ],
475
+ "page_idx": 3
476
+ },
477
+ {
478
+ "type": "page_number",
479
+ "text": "4",
480
+ "bbox": [
481
+ 911,
482
+ 30,
483
+ 919,
484
+ 40
485
+ ],
486
+ "page_idx": 3
487
+ },
488
+ {
489
+ "type": "text",
490
+ "text": "rience replay memory is proposed to pay more attention to important experiences. This indeed makes the learning better. Also,ueling is proposed as a new neural network architecture that represents two estimators for the Q function.",
491
+ "bbox": [
492
+ 73,
493
+ 69,
494
+ 491,
495
+ 128
496
+ ],
497
+ "page_idx": 4
498
+ },
499
+ {
500
+ "type": "text",
501
+ "text": "In detail, the training lasts a number of episodes and requires as input the IoV environment which includes the vehicles, the channel coefficients, the packet requirements, the available RBs and any other relevant IoV network parameter. It returns as output the trained DQN. The first step in DQL is to start the simulator which generates the vehicles and all network parameters, then it initializes the DQN hyperparameters. In the beginning of the first slot, the initial state of the IoV environment (initial distances of the vehicles, etc.) is revealed to gNB. Next, DQL iterates the episodes. For each episode, the environment is built by (i) updating the network parameters, e.g., the leftover bits of each source vehicle are updated based on the previous episodes, and (ii) moving the vehicles according to the mobility model. Next, the exploration rate $\\epsilon$ is annealed based on the episode index. Annealing the exploration rate over time is a technique used in RL to solve the dilemma between exploration and exploitation, i.e., as the time goes by, we decrease $\\epsilon$ to increase the exploitation probability as the agent starts to learn something useful. After a few episodes, the value of $\\epsilon$ is no longer decreased. Then, gNB chooses for each source vehicle an action that is a tuple of the coverage distance, the packet, the frequency slot, and the power level. Once gNB agent chooses its action according to the annealed $\\epsilon$ , it calculates the reward signal. Specifically, a destination vehicle calculates the received sinr, finds the number of bits a source vehicle is transmitting, and communicates this information to gNB using feedback channels. The environment moves to the next state and gNB adds to its prioritized replay memory the actual experience with some associated priority, i.e., the obtained tuple (state, action, reward, next state) is associated some priority. Initially, gNB assigns random priorities to its experiences but the priorities change as it starts to learn and updates its DQN parameters. gNB samples a mini-batch from its prioritized replay memory according to their priorities that forms a dataset used to train the DQN. gNB uses a variant of the well-known stochastic gradient descent to minimize the loss and it updates the priorities of the sampled experiences proportionally to the value of the loss. Finally, once in a while, the trained DQN is copied into the target DQN.",
502
+ "bbox": [
503
+ 73,
504
+ 128,
505
+ 491,
506
+ 734
507
+ ],
508
+ "page_idx": 4
509
+ },
510
+ {
511
+ "type": "text",
512
+ "text": "6) Implementing DQL: The inference of DQL is as follows (see Fig. 2). First, the trained DQN is loaded. Also, the annealed $\\epsilon$ is loaded from the last training episode (the index of the episode is also revealed). Then, for each episode (which represents a new random channel realization), the environment is reset and built—initializing the network parameters and the transmission behaviors of each agent. Next, for each slot, gNB agent, after observing the environment, chooses the best action according to its trained DQN after feedback communication between itself and the destination vehicles. Then, the reward signal is obtained, and the next episode starts with a new random channel realization.",
513
+ "bbox": [
514
+ 73,
515
+ 733,
516
+ 491,
517
+ 912
518
+ ],
519
+ "page_idx": 4
520
+ },
521
+ {
522
+ "type": "text",
523
+ "text": "The inference in DQL is done in an online fashion. That is, it is executed in each slot without knowing the future",
524
+ "bbox": [
525
+ 75,
526
+ 914,
527
+ 491,
528
+ 945
529
+ ],
530
+ "page_idx": 4
531
+ },
532
+ {
533
+ "type": "text",
534
+ "text": "observations. The training in DQL is the most computationally intensive task. It is executed for a large number of episodes and can be done in an offline manner with different channel conditions and IoV network topologies. Note that training in DQL needs to be re-executed only when the topology of the IoV network undergoes significant changes, depending on the IoV network dynamics.",
535
+ "bbox": [
536
+ 501,
537
+ 69,
538
+ 921,
539
+ 175
540
+ ],
541
+ "page_idx": 4
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "III. PERFORMANCE EVALUATION",
546
+ "text_level": 1,
547
+ "bbox": [
548
+ 604,
549
+ 191,
550
+ 821,
551
+ 205
552
+ ],
553
+ "page_idx": 4
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "In this section, we validate the proposed DQL method. The simulation setup is based on the highway scenario of [20] and most simulation parameters are taken from [22, 23]. We consider a six-lane highway with a total length of $2\\mathrm{km}$ where each lane has a width of $4\\mathrm{m}$ . There are three lanes for the forward direction (vehicles move from right to left) and three lanes for the backward direction. The source and destination vehicles are generated according to spatial Poisson process. Vehicles' speed determine the vehicle density and the average inter-vehicle distance (in the same lane) is $2.5\\mathrm{s} \\times \\nu$ where $\\nu$ is the vehicle absolute speed. The speed of a vehicle depends on its lane: the $i$ th forward lane (from top to bottom with $i \\in \\{1, 2, 3\\}$ ) is characterized by the speed of $60 + 2(i - 1) \\times 10\\mathrm{km/h}$ , whereas the $i$ th backward lane (from top to bottom with $i \\in \\{1, 2, 3\\}$ ) is characterized by the speed of $100 - 2(i - 1) \\times 10\\mathrm{km/h}$ . The number of source vehicles $m$ and destination vehicles $n$ is randomly chosen. The important simulation parameters are given as follows [22, 23]. The carrier frequency is $2\\mathrm{GHz}$ , the per-RB bandwidth is $1\\mathrm{MHz}$ , the vehicle antenna height is $1.5\\mathrm{m}$ , the vehicle antenna gain is $3\\mathrm{dBi}$ , the vehicle receiver noise figure is $9\\mathrm{dB}$ , the shadowing distribution is log-normal, the fast fading is Rayleigh, the pathloss model is LOS in WINNER + B1, the shadowing standard deviation is $3\\mathrm{dB}$ , and the noise power $N_0$ is $-114\\mathrm{dBm}$ .",
558
+ "bbox": [
559
+ 501,
560
+ 210,
561
+ 921,
562
+ 571
563
+ ],
564
+ "page_idx": 4
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "Unless specified otherwise, the slice 1 packet's size is randomly chosen in $\\{0.1..1\\}$ Mb. The slice 2 packet's size is 600 bytes. gNB chooses a coverage (in m) from the set $\\{100,400,1000,1400\\} \\cup \\{0\\}$ . The power levels (in dBm) are given by $\\{15,23,30\\} \\cup \\{-100\\}$ where $-100$ dBm is used to indicate no transmission. We set $m = 3$ , $n = 4$ , $F = 2$ , and $T = 20$ ; each slot has duration $5\\mathrm{ms}$ . The DQN is trained in the Julia programming language using Flux.jl. The DQN consists of an input and an output layer and of three fully connected hidden layers containing respectively 256, 128, and 120 neurons. The ReLu activation function is used in each layer. The ADAM optimizer with a learning rate of $10^{-5}$ is used. The training lasts 3000 episodes with an exploration rate starting from 1 and annealed to reach 0.02 for the $80\\%$ of the episodes.",
569
+ "bbox": [
570
+ 503,
571
+ 573,
572
+ 921,
573
+ 797
574
+ ],
575
+ "page_idx": 4
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "To the best of our knowledge, there are no current research works that solve IoVRA while considering the slice selection, the broadcast coverage selection, the RBs and the power allocation. We implement three benchmarks: two are based on NOMA and one is based on OMA. The partial idea of all benchmarks comes from [24] which is based on the swap matching algorithm. All benchmarks are centralized in the edge and offline. They are called OMA-MP, NOMA-MP, and NOMA-RP. In OMA-MP, every RB is used by at most one",
580
+ "bbox": [
581
+ 503,
582
+ 799,
583
+ 921,
584
+ 936
585
+ ],
586
+ "page_idx": 4
587
+ },
588
+ {
589
+ "type": "page_number",
590
+ "text": "5",
591
+ "bbox": [
592
+ 911,
593
+ 30,
594
+ 919,
595
+ 39
596
+ ],
597
+ "page_idx": 4
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "vehicle and the maximum transmission power is allocated. In NOMA-MP and NOMA-RP, every RB can be shared, and the maximum transmission power or a random transmission power are allocated, respectively. The coverage and slice selections are decided randomly at the beginning of each slot. The allocation of the RBs to the vehicles is done similarly in all benchmarks. First, an initial RB allocation is executed that gives the highest sum of channel power gain between a source vehicle and its destination vehicle. Once the initial allocation is obtained, a swap matching is performed to improve the number of packets successfully received. If no swap improves the matching, then the algorithm terminates.",
602
+ "bbox": [
603
+ 73,
604
+ 69,
605
+ 491,
606
+ 250
607
+ ],
608
+ "page_idx": 5
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "In the simulation results, we present two performance metrics: the cumulative rewards for training the DQL and the number of successfully received packets for the inferring DQL. In the training, the reward signal received by gNB is given by the sum of the individual rewards of each source vehicle. The individual reward is equal either to (i) the upper-bounded achievable rate or to (ii) the upper bound. The event (i) happens when a packet is not yet delivered whereas the event (ii) happens when a packet is completely and successfully delivered. In the inference, the reward signal is simply given as the total number of successfully delivered packets.",
613
+ "bbox": [
614
+ 73,
615
+ 250,
616
+ 491,
617
+ 416
618
+ ],
619
+ "page_idx": 5
620
+ },
621
+ {
622
+ "type": "image",
623
+ "img_path": "images/4a584faf949e627ad0d602280645013c4048e5ef0d009b1c2c52d2a70cffaeba.jpg",
624
+ "image_caption": [
625
+ "Fig. 3: Training rewards."
626
+ ],
627
+ "image_footnote": [],
628
+ "bbox": [
629
+ 94,
630
+ 425,
631
+ 465,
632
+ 655
633
+ ],
634
+ "page_idx": 5
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "Fig. 3 illustrates the convergence of the proposed DQL algorithm versus training episodes. The figure shows the cumulative average rewards per episode where the average is taken over the last 200 episodes. It is clear that the average reward improves as the training episodes increase. This shows the effectiveness of the proposed algorithm. The training in DQL gradually converges starting from the episode number $\\approx 2700$ . Note that the convergence of the algorithm is not smooth and contains some fluctuations which is due mainly to the high mobility nature of the IoV environment. Based on Fig. 3, DQN is trained for 3000 episodes to provide some convergence guarantees.",
639
+ "bbox": [
640
+ 73,
641
+ 688,
642
+ 491,
643
+ 868
644
+ ],
645
+ "page_idx": 5
646
+ },
647
+ {
648
+ "type": "text",
649
+ "text": "In the next two figures, we present, as a performance metric, the reward obtained in the inference part of DQL, which is the number of successfully received packets. We show this performance metric as stacked bars where each bar is divided into two parts: the lower part indicates the number",
650
+ "bbox": [
651
+ 73,
652
+ 869,
653
+ 491,
654
+ 946
655
+ ],
656
+ "page_idx": 5
657
+ },
658
+ {
659
+ "type": "text",
660
+ "text": "of successfully delivered slice 1 packets and the higher part indicates the number of successfully delivered slice 2 packets.",
661
+ "bbox": [
662
+ 503,
663
+ 68,
664
+ 921,
665
+ 99
666
+ ],
667
+ "page_idx": 5
668
+ },
669
+ {
670
+ "type": "image",
671
+ "img_path": "images/be8dbc8dea61666a9a56204e6d7fa6441378d4f53a78d6acadef9af92e45ad59.jpg",
672
+ "image_caption": [
673
+ "Fig. 4: Impact of safety message sizes"
674
+ ],
675
+ "image_footnote": [],
676
+ "bbox": [
677
+ 519,
678
+ 112,
679
+ 903,
680
+ 371
681
+ ],
682
+ "page_idx": 5
683
+ },
684
+ {
685
+ "type": "text",
686
+ "text": "Fig. 4 shows the performance of DQL against the benchmarks when varying the slice 2 packet sizes. We can see that DQL succeeds in delivering more packets without having the full and future CSI as in the benchmarks. For example, DQL can, on average, deliver successfully almost 9 packets. However, other benchmarks can only deliver, on average, almost 6 packets. NOMA-RP achieves the lowest performance as expected. Further, DQL achieves a higher number of successfully delivered slice 2 packets. This is particularly important in IoV communication as slice 2 packets are mainly safety packets and thus must have a higher priority of being delivered.",
687
+ "bbox": [
688
+ 501,
689
+ 414,
690
+ 921,
691
+ 594
692
+ ],
693
+ "page_idx": 5
694
+ },
695
+ {
696
+ "type": "image",
697
+ "img_path": "images/f88b3b69828cb5b565fc164001ae017e0d188451f7e3f4d6d499786576fe2207.jpg",
698
+ "image_caption": [
699
+ "Fig. 5: Impact of safety message deadlines"
700
+ ],
701
+ "image_footnote": [],
702
+ "bbox": [
703
+ 521,
704
+ 606,
705
+ 901,
706
+ 864
707
+ ],
708
+ "page_idx": 5
709
+ },
710
+ {
711
+ "type": "text",
712
+ "text": "Fig. 5 shows the performance of DQL against the benchmarks when varying the slice 2 packets deadlines. DQL",
713
+ "bbox": [
714
+ 504,
715
+ 910,
716
+ 921,
717
+ 941
718
+ ],
719
+ "page_idx": 5
720
+ },
721
+ {
722
+ "type": "page_number",
723
+ "text": "6",
724
+ "bbox": [
725
+ 911,
726
+ 31,
727
+ 919,
728
+ 39
729
+ ],
730
+ "page_idx": 5
731
+ },
732
+ {
733
+ "type": "text",
734
+ "text": "still achieves the best performance when the deadline of the safety packets increases. The gap between DQL and other benchmarks widens further as the deadline increases. We further notice that NOMA-RP has the worst performance for all algorithms which shows the need of a suitable power allocation method in IoVRA.",
735
+ "bbox": [
736
+ 73,
737
+ 69,
738
+ 491,
739
+ 157
740
+ ],
741
+ "page_idx": 6
742
+ },
743
+ {
744
+ "type": "text",
745
+ "text": "We notice from both Fig. 4 and Fig. 5 that there is an unfair allocation of resources between the packets of the two slices. This is mainly due to highly dynamic nature of the IoV network (e.g., vehicle positions, their speeds, etc.). For example, if a source vehicle is located close to a destination vehicle, then the quality of the wireless link between both vehicles will likely be good. Thus, gNB learns through DQL to equally likely transmit both packets. However, in the case where the source vehicle is located far away from the corresponding destination vehicle, the quality of the wireless link between both parties will probably be poor and thus, gNB will likely learn through DQL to transmit only slice 2 packets to guarantee a successful V2V communication (since slice 2 packets might not require a large number of RBs compared to slice 1 packets). It is thus important to study the fairness among different slices in such IoV network, which will be investigated in our future works.",
746
+ "bbox": [
747
+ 75,
748
+ 159,
749
+ 493,
750
+ 417
751
+ ],
752
+ "page_idx": 6
753
+ },
754
+ {
755
+ "type": "text",
756
+ "text": "IV. CONCLUSIONS AND FUTURE WORKS",
757
+ "text_level": 1,
758
+ "bbox": [
759
+ 151,
760
+ 433,
761
+ 413,
762
+ 446
763
+ ],
764
+ "page_idx": 6
765
+ },
766
+ {
767
+ "type": "text",
768
+ "text": "In this paper, we developed an online MEC-based scheme to solve the slice selection, coverage selection, resource block and non-orthogonal multiple access power allocation problem in the Internet of vehicles network. We modelled the problem as a single agent Markov decision process and developed a DQL algorithm. The proposed DQL algorithm is proven robust and effective against various system parameters including the high mobility characteristics of IoV networks. It also outperformed some baseline benchmark algorithms that are based on global and offline decisions. In future works, we will investigate a two-time scale DRL approach that decides for coverage and slice selection on a slower time scale. Further, we will study the fairness of multiple slices. Finally, we will extend our system model to include mmWave communications.",
769
+ "bbox": [
770
+ 73,
771
+ 452,
772
+ 493,
773
+ 662
774
+ ],
775
+ "page_idx": 6
776
+ },
777
+ {
778
+ "type": "text",
779
+ "text": "V. ACKNOWLEDGMENT",
780
+ "text_level": 1,
781
+ "bbox": [
782
+ 205,
783
+ 681,
784
+ 361,
785
+ 694
786
+ ],
787
+ "page_idx": 6
788
+ },
789
+ {
790
+ "type": "text",
791
+ "text": "The authors would like to thank the Natural Sciences and Engineering Research Council of Canada (NSERC) and the Fonds de recherche du Quebec - Nature et technologies (FRQNT), for the financial support of this research.",
792
+ "bbox": [
793
+ 73,
794
+ 699,
795
+ 491,
796
+ 761
797
+ ],
798
+ "page_idx": 6
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "REFERENCES",
803
+ "text_level": 1,
804
+ "bbox": [
805
+ 240,
806
+ 777,
807
+ 326,
808
+ 789
809
+ ],
810
+ "page_idx": 6
811
+ },
812
+ {
813
+ "type": "list",
814
+ "sub_type": "ref_text",
815
+ "list_items": [
816
+ "[1] A. Triwinarko, I. Dayoub, and S. Cherkaoui, \"Phy layer enhancements for next generation v2x communication,\" *Vehicular Communications*, vol. 32, p. 100385, 2021.",
817
+ "[2] A. Alalewi, I. Dayoub, and S. Cherkaoui, \"On 5g-v2x use cases and enabling technologies: a comprehensive survey,\" IEEE Access, 2021.",
818
+ "[3] R. Soua, I. Turcanu, F. Adamsky, D. Führer, and T. Engel, \"Multi-Access Edge Computing for Vehicular Networks: A Position Paper,\" in Proc. IEEE Globecom Workshops (GC Wkshps), 2018, pp. 1-6.",
819
+ "[4] Z. Mika and S. Cherkaoui, “Network slicing for vehicular communications: a multi-agent deep reinforcement learning approach,” Annals of Telecommunications, vol. 76, no. 9, pp. 665–683, 2021."
820
+ ],
821
+ "bbox": [
822
+ 84,
823
+ 797,
824
+ 491,
825
+ 922
826
+ ],
827
+ "page_idx": 6
828
+ },
829
+ {
830
+ "type": "list",
831
+ "sub_type": "ref_text",
832
+ "list_items": [
833
+ "[5] C. Campolo, A. Molinaro, A. Iera, R. R. Fontes, and C. E. Rothenberg, \"Towards 5G Network Slicing for the V2X Ecosystem,\" in Proc. IEEE Conf. on Netw. Softwarization and Workshops (NetSoft), 2018, pp. 400-405.",
834
+ "[6] H. Khan, P. Luoto, S. Samarakoon, M. Bennis, and M. Latva-Aho, \"Network Slicing for Vehicular Communication,\" Transactions on Emerging Telecommunications Technologies, p. e3652, e3652 ett.3652. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/ett.3652",
835
+ "[7] M. Azizian, S. Cherkaoui, and A. S. Hafid, \"Vehicle software updates distribution with sdn and cloud computing,\" IEEE Communications Magazine, vol. 55, no. 8, pp. 74-79, 2017.",
836
+ "[8] L. Liang, H. Ye, G. Yu, and G. Y. Li, “Deep-Learning-Based Wireless Resource Allocation With Application to Vehicular Networks,” Proc. IEEE, vol. 108, no. 2, pp. 341–356, 2020.",
837
+ "[9] A. Abouaomar, S. Cherkaoui, Z. Mlika, and A. Kobbane, \"Service function chaining in mec: A mean-field game and reinforcement learning approach,\" arXiv preprint arXiv:2105.04701, 2021.",
838
+ "[10] 3GPP, \"Study on NR Vehicle-to-Everything (V2X),\" 3rd Generation Partnership Project (3GPP), Technical Report (TR) 38.885, 03 2019, version 16.0.0. [Online]. Available: https://portal.3gpp.org/Desktopmodules/Specifications/SpecificationDetails.aspx?specifiedId=3497",
839
+ "[11] B. Di, L. Song, Y. Li, and Z. Han, \"V2X Meets NOMA: Non-Orthogonal Multiple Access for 5G-Enabled Vehicular Networks,\" IEEE Wireless Commun., vol. 24, no. 6, pp. 14-21, 2017.",
840
+ "[12] 5GCAR, \"Final Design and Evaluation of the 5G V2X System Level Architecture and Security Framework,\" The 5G Infrastructure Public Private Partnership (5GPPP), Deliverable D4.2, 11 2019, version 1.1. [Online]. Available: https://ec.europa.eu/research/participants/documents/downloadPublic?documentIds=080166e5c9d36fbc&appId=PPGMS",
841
+ "[13] S. A. Ashraf, R. Blasco, H. Do, G. Fodor, C. Zhang, and W. Sun, \"Supporting Vehicle-to-Everything Services by 5G New Radio Release-16 Systems,\" IEEE Commun. Standards Mag., vol. 4, no. 1, pp. 26-32, 2020.",
842
+ "[14] M. Azizian, S. Cherkoui, and A. S. Hafid, “A distributed d-hop cluster formation for vanet,” in 2016 IEEE wireless communications and networking conference. IEEE, 2016, pp. 1-6.",
843
+ "[15] ——, “A distributed cluster based transmission scheduling in vanet,” in 2016 IEEE international conference on communications (ICC). IEEE, 2016, pp. 1-6.",
844
+ "[16] ——, “Dcv: A distributed cluster formation for vanet based on end-to-end reactive mobility,” in 2016 International Wireless Communications and Mobile Computing Conference (IWCMC). IEEE, 2016, pp. 287–291.",
845
+ "[17] M. Azizian, S. Cherkaoui, and A. Hafid, \"An optimized flow allocation in vehicular cloud,\" IEEE Access, vol. 4, pp. 6766-6779, 2016.",
846
+ "[18] V. Mnih et al., \"Human-Level Control Through Deep Reinforcement Learning,\" Nature, vol. 518, no. 7540, pp. 529-533, 02 2015. [Online]. Available: http://dx.doi.org/10.1038/nature14236",
847
+ "[19] Y. S. Nasir and D. Guo, \"Multi-Agent Deep Reinforcement Learning for Dynamic Power Allocation in Wireless Networks,\" IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2239-2250, 2019.",
848
+ "[20] 3GPP, \"Study on Evaluation Methodology of New Vehicle-to-Everything (V2X) Use Cases for LTE and NR,\" 3rd Generation Partnership Project (3GPP), Technical Report (TR) 37.885, 06 2019, version 15.3.0. [Online]. Available: https://portal.3gpp.org/desktopmodules/ Specifications/SpecificationDetails.aspx?specificationId=3209",
849
+ "[21] M. J. Kochenderfer, C. Amato, G. Chowdhary, J. P. How, H. J. D. Reynolds, J. R. Thornton, P. A. Torres-Carrasquillo, N. K. Üre, and J. Vian, Decision Making Under Uncertainty: Theory and Application, 1st ed. The MIT Press, 2015.",
850
+ "[22] L. Liang, H. Ye, and G. Y. Li, \"Spectrum Sharing in Vehicular Networks Based on Multi-Agent Reinforcement Learning,\" IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2282-2292, 2019.",
851
+ "[23] L. Wang, H. Ye, L. Liang, and G. Y. Li, \"Learn to Compress CSI and Allocate Resources in Vehicular Networks,\" IEEE Trans. Commun., vol. 68, no. 6, pp. 3640-3653, 2020.",
852
+ "[24] M. Zeng, A. Yadav, O. A. Dobre, and H. V. Poor, \"Energy-Efficient Joint User-RB Association and Power Allocation for Uplink Hybrid NOMA-OMA,\" IEEE Internet Things J., vol. 6, no. 3, pp. 5119-5131, Jun. 2019."
853
+ ],
854
+ "bbox": [
855
+ 506,
856
+ 70,
857
+ 921,
858
+ 864
859
+ ],
860
+ "page_idx": 6
861
+ },
862
+ {
863
+ "type": "page_number",
864
+ "text": "7",
865
+ "bbox": [
866
+ 911,
867
+ 30,
868
+ 919,
869
+ 39
870
+ ],
871
+ "page_idx": 6
872
+ }
873
+ ]
2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_model.json ADDED
@@ -0,0 +1,1193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "page_number",
5
+ "bbox": [
6
+ 0.912,
7
+ 0.031,
8
+ 0.921,
9
+ 0.04
10
+ ],
11
+ "angle": 0,
12
+ "content": "1"
13
+ },
14
+ {
15
+ "type": "aside_text",
16
+ "bbox": [
17
+ 0.023,
18
+ 0.277,
19
+ 0.058,
20
+ 0.708
21
+ ],
22
+ "angle": 270,
23
+ "content": "arXiv:2201.11295v1 [cs.NI] 27 Jan 2022"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.193,
29
+ 0.071,
30
+ 0.804,
31
+ 0.105
32
+ ],
33
+ "angle": 0,
34
+ "content": "Network Slicing with MEC and Deep"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.082,
40
+ 0.108,
41
+ 0.917,
42
+ 0.139
43
+ ],
44
+ "angle": 0,
45
+ "content": "Reinforcement Learning for the Internet of Vehicles"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.202,
51
+ 0.148,
52
+ 0.795,
53
+ 0.165
54
+ ],
55
+ "angle": 0,
56
+ "content": "Zoubeir Mlika, Member, IEEE, and Soumaya Cherkoui, Senior Member, IEEE"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.074,
62
+ 0.221,
63
+ 0.492,
64
+ 0.323
65
+ ],
66
+ "angle": 0,
67
+ "content": "Abstract—The interconnection of vehicles in the future fifth generation (5G) wireless ecosystem forms the so-called Internet of vehicles (IoV). IoV offers new kinds of applications requiring delay-sensitive, compute-intensive and bandwidth-hungry services. Mobile edge computing (MEC) and network slicing (NS) are two of the key enabler technologies in 5G networks that can be used to optimize the allocation of the network resources and guarantee the diverse requirements of IoV applications."
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.074,
73
+ 0.322,
74
+ 0.493,
75
+ 0.512
76
+ ],
77
+ "angle": 0,
78
+ "content": "As traditional model-based optimization techniques generally end up with NP-hard and strongly non-convex and non-linear mathematical programming formulations, in this paper, we introduce a model-free approach based on deep reinforcement learning (DRL) to solve the resource allocation problem in MEC-enabled IoV network based on network slicing. Furthermore, the solution uses non-orthogonal multiple access (NOMA) to enable a better exploitation of the scarce channel resources. The considered problem addresses jointly the channel and power allocation, the slice selection and the vehicles selection (vehicles grouping). We model the problem as a single-agent Markov decision process. Then, we solve it using DRL using the well-known DQL algorithm. We show that our approach is robust and effective under different network conditions compared to benchmark solutions."
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.223,
84
+ 0.537,
85
+ 0.344,
86
+ 0.55
87
+ ],
88
+ "angle": 0,
89
+ "content": "I. INTRODUCTION"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.074,
95
+ 0.555,
96
+ 0.493,
97
+ 0.875
98
+ ],
99
+ "angle": 0,
100
+ "content": "The Internet of vehicles (IoV) is an emerging concept that enhances the existing capabilities of vehicular communication by integrating with the Internet of things (IoT). IoV is a key use-case in the upcoming beyond fifth generation (5G) wireless networks [1, 2]. IoV creates diverse new applications with extremely diverse service requirements including ultrahigh reliable and delay-sensitive, bandwidth-hungry as well as compute-intensive applications [3]. For example, accident reports require ultra-reliable and extremely low latency whereas high definition map sharing require high bandwidth. An important open question in today's IoV networks is \"how to support, using a unified air interface, future IoV services while guaranteeing their extremely diverse performance requirements?\" Network slicing (NS) is a potential solution to respond to this question [4-6]. NS is a tool that enables network operators to support virtualized end-to-end networks that belongs to the principle of software defined networking [7]. It mainly allows creating different logical networks on the top of a common and programmable physical infrastructure. Another technology, namely mobile edge computing, or better known as multi-access edge computing (MEC), is considered as an"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.075,
106
+ 0.883,
107
+ 0.492,
108
+ 0.943
109
+ ],
110
+ "angle": 0,
111
+ "content": "Zoubeir Mlika and Soumaya Cherkaoui are with the research laboratory on intelligent engineering for communications and networking (INTERLAB), Faculty of Engineering, Department of Electrical and Computer Science Engineering, University of Sherbrooke, Sherbrooke J1K 2R1, Quebec, Canada, (e-mail: zoubeir.mlika@usherbrooke.ca, soumaya.cherkaoui@usherbrooke.ca)."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.503,
117
+ 0.22,
118
+ 0.923,
119
+ 0.31
120
+ ],
121
+ "angle": 0,
122
+ "content": "important building block in the future IoV ecosystem. The joint implementation of NS and MEC is a key enabler for IoV networks. These two technologies can be used not only to guarantee the diverse requirements of IoV applications but also to deploy the diverse vehicular services at the appropriate locations [3]."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.503,
128
+ 0.311,
129
+ 0.923,
130
+ 0.416
131
+ ],
132
+ "angle": 0,
133
+ "content": "Optimal resource allocation in IoV would go through traditional model-based optimization techniques. Due to the complex and highly dynamic nature of IoV, such a model-based approach is not very appealing. In fact, such approach ends up with strongly non-convex optimization problems that are generally NP-hard [8]. Thus, a model-free machine learning approach is crucial."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.503,
139
+ 0.417,
140
+ 0.923,
141
+ 0.568
142
+ ],
143
+ "angle": 0,
144
+ "content": "Reinforcement learning (RL) is a useful technique in solving NP-hard optimization problems. It has been applied successfully to solve very hard problems in different research areas including wireless networks [9]. It is based on Markov decision process (MDP) modeling where agents learn to select the best actions through repeated interactions with an unknown environment by receiving numerical reward signals [8]. Deep RL (DRL) uses the strong ability of neural networks to generalize across enormous state spaces and reduce the complexity of a solution, thus improving the learning process."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.503,
150
+ 0.568,
151
+ 0.923,
152
+ 0.734
153
+ ],
154
+ "angle": 0,
155
+ "content": "In this paper, using DRL, we propose a new solution framework to solve the challenging problem of resource allocation in a MEC-enabled IoV network. More specifically, we focus on the in-coverage scenario of 5G-new radio (5G-NR) in which vehicles communicate with each other through a base station, e.g., NodeB (gNB), that performs MEC-based tasks [10]. We focus on the broadcast communication technique. Due to the scarce spectrum resources, non-orthogonal multiple access (NOMA) is also used in our proposed framework. NOMA is a promising technique to increase the spectral efficiency in vehicular networks [11]."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.503,
161
+ 0.734,
162
+ 0.923,
163
+ 0.945
164
+ ],
165
+ "angle": 0,
166
+ "content": "In more detail, the considered resource allocation problem, called IoV resource allocation (IoVRA), involves the allocation of four resources: the slice (deciding which packet to send), the coverage of the broadcast (deciding the range of the broadcast), the resource blocks (RBs), and the power. By carefully allocating these four resources, and by applying the successive interference cancellation (SIC) at the corresponding destination vehicles, NOMA can help in boosting the capacity of the IoV network. The use of NOMA in broadcast communications is different from the usual uplink and downlink NOMA techniques, which is due from the broadcast nature in IoV networks, i.e., two source vehicles broadcast with two distinct transmission powers to the same group of destination vehicles."
167
+ },
168
+ {
169
+ "type": "footer",
170
+ "bbox": [
171
+ 0.078,
172
+ 0.958,
173
+ 0.926,
174
+ 0.983
175
+ ],
176
+ "angle": 0,
177
+ "content": "Copyright (c) 2020 IEEE. Personal use of this material is permitted. However, permission to use this material for any other purposes must be obtained from the IEEE by sending a request to pubs-permissions@ieee.org."
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "page_number",
183
+ "bbox": [
184
+ 0.912,
185
+ 0.031,
186
+ 0.921,
187
+ 0.041
188
+ ],
189
+ "angle": 0,
190
+ "content": "2"
191
+ },
192
+ {
193
+ "type": "image",
194
+ "bbox": [
195
+ 0.092,
196
+ 0.068,
197
+ 0.924,
198
+ 0.535
199
+ ],
200
+ "angle": 0,
201
+ "content": null
202
+ },
203
+ {
204
+ "type": "image_caption",
205
+ "bbox": [
206
+ 0.318,
207
+ 0.541,
208
+ 0.678,
209
+ 0.556
210
+ ],
211
+ "angle": 0,
212
+ "content": "Fig. 1: Two network slices in an IoV-based MEC network."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.074,
218
+ 0.583,
219
+ 0.492,
220
+ 0.778
221
+ ],
222
+ "angle": 0,
223
+ "content": "Even though we propose a MEC-based IoV solution for the case of vehicle-to-vehicle (V2V) communications, our proposed system model is valid for vehicle-to-infrastructure (V2I) communications as well. Indeed, in V2I communications, a vehicle communicates with a gNB-type road side unit (RSU) or a user-type RSU through the cellular Uu or the sidelink (SL) connectivity [12]. For the case of user-type RSU communications, the coverage range selection decision will simply include the RSU. For the case of gNB-type RSU communications, the broadcast coverage range selection could be ignored and replaced by RSU association. Thus, our proposed solution framework is still valid for both V2V and V2I communications."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.074,
229
+ 0.779,
230
+ 0.492,
231
+ 0.9
232
+ ],
233
+ "angle": 0,
234
+ "content": "To the best of our knowledge, this is the first work that proposes a model-free DRL framework to solve IoVRA in MEC-enabled IoV networks based on broadcast, NS and NOMA. The contributions of our work are the following. We model IoVRA as a single agent MDP. Next, we propose a deep-Q-learning (DQL) algorithm to solve it. Finally, we show that our proposed DQL algorithm outperforms benchmark algorithms."
235
+ },
236
+ {
237
+ "type": "title",
238
+ "bbox": [
239
+ 0.505,
240
+ 0.583,
241
+ 0.62,
242
+ 0.598
243
+ ],
244
+ "angle": 0,
245
+ "content": "A. Organization"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.504,
251
+ 0.602,
252
+ 0.922,
253
+ 0.693
254
+ ],
255
+ "angle": 0,
256
+ "content": "The article is organized as follows. Section II presents the system model, the single agent MDP, and describes the proposed DQL algorithm. Section III presents benchmark algorithmic solutions and gives the simulation results. Finally, section IV draws some conclusions and discusses interesting open research questions."
257
+ },
258
+ {
259
+ "type": "title",
260
+ "bbox": [
261
+ 0.556,
262
+ 0.71,
263
+ 0.871,
264
+ 0.724
265
+ ],
266
+ "angle": 0,
267
+ "content": "II. PROPOSED DRL FOR INTERNET OF VEHICLES"
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.504,
273
+ 0.729,
274
+ 0.712,
275
+ 0.743
276
+ ],
277
+ "angle": 0,
278
+ "content": "A. Internet of Vehicles Model"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.503,
284
+ 0.748,
285
+ 0.922,
286
+ 0.883
287
+ ],
288
+ "angle": 0,
289
+ "content": "We consider an IoV network composed of a set of source vehicles that generate packets, and a set of destination vehicles that receive packets. All vehicles operate in the in-coverage scenario of 5G-NR [10] and thus they are covered by some gNB that performs edge computing. A source vehicle uses broadcast communications to transmit to a subset of the destination vehicles. The time is slotted into a set of slots. The total bandwidth is divided into a set of frequency slots. A resource block (RB) is given by the pair (frequency, slot)."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.504,
295
+ 0.885,
296
+ 0.922,
297
+ 0.945
298
+ ],
299
+ "angle": 0,
300
+ "content": "The proposed system model supports several use cases, including advanced driving with trajectory sharing, extended sensors [13] and is valid for both V2V and V2I communications. To provide guaranteed quality of service requirements"
301
+ }
302
+ ],
303
+ [
304
+ {
305
+ "type": "page_number",
306
+ "bbox": [
307
+ 0.912,
308
+ 0.032,
309
+ 0.921,
310
+ 0.04
311
+ ],
312
+ "angle": 0,
313
+ "content": "3"
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.075,
319
+ 0.07,
320
+ 0.493,
321
+ 0.281
322
+ ],
323
+ "angle": 0,
324
+ "content": "to the different use cases, NS is used, which is an efficient solution in IoV networks [6]. It mainly creates logical networks on the top of a common and programmable MEC-enabled IoV infrastructure. We create two network slices. The first slice (slice 1) is designed for non-safety applications such as video streaming. The second slice (slice 2) is designed for safety applications such as emergency warnings. An example of the MEC-enabled NS system model is given in Fig. 1, where vehicles communicate with gNBs that are connected to MEC servers. On top of this network infrastructure, two network slices are created to support IoV applications. Slice 1 is designated for high throughput or enhanced mobile broadband communication (eMBB) and slice 2 is designated for ultra-reliable and low latency communication (uRLLC)."
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.075,
330
+ 0.281,
331
+ 0.493,
332
+ 0.476
333
+ ],
334
+ "angle": 0,
335
+ "content": "Each source vehicle has two different packets for each slice, where slice 1's packet \\((\\mathsf{pkt}^n)\\) requires high throughput whereas slice 2's packet \\((\\mathsf{pkt}^s)\\) has stringent latency requirements. For any packet to be delivered successfully, the corresponding source vehicle requires a set of RBs such that the achievable data rates are above the minimum requirements. Packet \\(\\mathsf{pkt}^n\\) can be transmitted using any RBs from the frequency-slot resource pool with a carefully chosen transmission power per each RB. However, \\(\\mathsf{pkt}^s\\), having an arrival time and a deadline, can be transmitted using any frequency slot but only using slots between its arrival time and deadline with a carefully chosen transmission power per each RB. The wireless channel gain between two vehicles includes fast and slow fading."
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.075,
341
+ 0.478,
342
+ 0.493,
343
+ 0.598
344
+ ],
345
+ "angle": 0,
346
+ "content": "A source vehicle has to decide which packet to send, at what range to broadcast, what RBs to use, and what transmission powers to allocate. The range broadcasting optimization is similar to the classical vehicle clustering [14-17]. To improve the spectral efficiency of the IoV network, we use NOMA to superimpose the transmissions of the source vehicles transmitting to some destination vehicle, which uses SIC to decode the superimposed transmissions."
347
+ },
348
+ {
349
+ "type": "title",
350
+ "bbox": [
351
+ 0.076,
352
+ 0.618,
353
+ 0.36,
354
+ 0.634
355
+ ],
356
+ "angle": 0,
357
+ "content": "B. Proposed Deep-Q-Learning Algorithm"
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.075,
363
+ 0.637,
364
+ 0.493,
365
+ 0.878
366
+ ],
367
+ "angle": 0,
368
+ "content": "Vehicles operate in the coverage of gNB with MEC, that collects information about vehicles and performs pilot estimation to obtain the channel statistics. Based on the obtained feedback information, gNB observes the IoV environment and makes decisions. It plays the role of an intelligent entity in a single agent MDP. With the help of DRL, gNB learns to solve efficiently the complicated IoVRA problem. Specifically, gNB implements the well-known DQL approach [18]. DQL has mainly two parts: training and inference. In training, gNB trains a deep-Q-network (DQN), whereas in inference, it takes actions according to its trained DQN. DQL is an improvement of the so-called QL algorithm that is based on a tabular method which creates a table of state-action pairs. QL explores the action space using an exploration policy, e.g., \\(\\epsilon\\)-greedy. Despite the proven effectiveness of QL, it generally fails when the state and action spaces become large as in IoVRA."
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.075,
374
+ 0.878,
375
+ 0.493,
376
+ 0.94
377
+ ],
378
+ "angle": 0,
379
+ "content": "DQL is a promising technique that is proposed to solve the curse of dimensionality in RL by approximating the Q action-value function using deep learning. One way to solve IoVRA is through multi-agent DRL by combining independent QL for"
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.503,
385
+ 0.07,
386
+ 0.923,
387
+ 0.236
388
+ ],
389
+ "angle": 0,
390
+ "content": "each agent. That is, each agent tries to learn its own policy based on its own observations and actions while treating all other agents as part of the environment. This badly influences the result of the training as it creates a non-stationary environment that changes as other agents take decisions. For this reason, a MEC-enabled IoV network facilitates the training in such situation by modeling IoVRA as a single agent who performs the training at the edge of the IoV network. The system architecture of the proposed DQN approach is given in Fig. 2, in which gNB and MEC server interact with the IoV environment and take decisions accordingly."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.504,
396
+ 0.236,
397
+ 0.923,
398
+ 0.342
399
+ ],
400
+ "angle": 0,
401
+ "content": "Before describing in detail DQL, first, IoVRA is modeled as a single agent MDP given by the quadruple: state space, action space, reward function and transition probability. The agent in this MDP is the gNB, which takes an action, receives a reward and moves to the next state based on its interaction with the unknown IoV environment. This interaction helps gNB gain more experiences and improves its accumulated reward."
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.504,
407
+ 0.342,
408
+ 0.923,
409
+ 0.583
410
+ ],
411
+ "angle": 0,
412
+ "content": "1) The State Space: At any slot, any state of the IoV environment is unknown directly to gNB. Instead, gNB receives an observation from the IoV environment. In our model, an observation includes local channel state information (CSI) and the transmission behavior of the source vehicles. More precisely, an observation includes the large and small-scale fading values between vehicles. These values can be accurately estimated by the destination vehicles and fed back to gNB without significant delay [19]. The observation also includes a decision variable that indicates whether the source vehicles transmitted in previous slots and if so which packet did they transmit. The third observation indicates the number of leftover bits of packets that each source vehicle needs to send (e.g., initially, the number of leftover bits correspond to the packets sizes). The fourth observation element includes the arrival time and the deadline of slice 2 packets."
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.504,
418
+ 0.583,
419
+ 0.923,
420
+ 0.884
421
+ ],
422
+ "angle": 0,
423
+ "content": "2) The Action Space: IoVRA is solved in an online fashion where at each slot, gNB makes a decision that includes (i) the broadcast coverage range selection (ii) the slice selection (iii) the RB allocation, and (iv) the power allocation. For (i), we define a discrete set of coverage distances (including zero). Thus, if gNB chooses a coverage distance (or 0), then it will broadcast (or does not) to all destination vehicles within the chosen coverage circle having as radius the indicated range. For (ii), we define a discrete set of packets (including the empty set) that indicates which packet gNB will decide to transmit. At each slot, each source vehicle has three possible choices: it does not transmit, it transmits a slice 1 packet, or it transmits a slice 2 packet. For (iii), the RB allocation is about choosing the frequency slot to be used in the current slot. For (iv), gNB carefully chooses the transmission power per RB. Note that continuous power allocation makes the implementation of DQL more complex and thus, to keep things simple, we use a discrete set of power levels that gNB can use. Finally, the action space of gNB is given by the Cartesian product of these four discrete sets."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.504,
429
+ 0.885,
430
+ 0.923,
431
+ 0.947
432
+ ],
433
+ "angle": 0,
434
+ "content": "3) The Reward Signal: We mainly focus on maximizing the packet reception ratio (PRR) [20] in IoV broadcast networks. PRR is defined in as follows: for one packet and one source vehicle, the PRR is given by the percentage of vehicles with"
435
+ },
436
+ {
437
+ "type": "list",
438
+ "bbox": [
439
+ 0.504,
440
+ 0.342,
441
+ 0.923,
442
+ 0.947
443
+ ],
444
+ "angle": 0,
445
+ "content": null
446
+ }
447
+ ],
448
+ [
449
+ {
450
+ "type": "page_number",
451
+ "bbox": [
452
+ 0.912,
453
+ 0.031,
454
+ 0.921,
455
+ 0.041
456
+ ],
457
+ "angle": 0,
458
+ "content": "4"
459
+ },
460
+ {
461
+ "type": "image",
462
+ "bbox": [
463
+ 0.096,
464
+ 0.068,
465
+ 0.904,
466
+ 0.462
467
+ ],
468
+ "angle": 0,
469
+ "content": null
470
+ },
471
+ {
472
+ "type": "image_caption",
473
+ "bbox": [
474
+ 0.386,
475
+ 0.469,
476
+ 0.611,
477
+ 0.485
478
+ ],
479
+ "angle": 0,
480
+ "content": "Fig. 2: IoV-based DRL architecture."
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.074,
486
+ 0.512,
487
+ 0.492,
488
+ 0.556
489
+ ],
490
+ "angle": 0,
491
+ "content": "successful reception among the total number of receptions. PRR directly relates to the number of successfully received packets. Therefore, our main goal is to maximize the later."
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.074,
497
+ 0.557,
498
+ 0.493,
499
+ 0.934
500
+ ],
501
+ "angle": 0,
502
+ "content": "The reward signal at any slot is the sum of individual rewards of each source vehicle. Hence, the reward signal depends on whether each source vehicle has successfully transmitted its packet or not. Technically, since we aim to maximize the number of successfully received packets, we set the reward to one once a packet is successfully delivered and zero otherwise. However, this leads to poor design since the zero individual reward leads to no useful information for learning. Thus, we build the individual reward design based on the following. When a packet is not successfully delivered or the delivery has not been completed yet, the individual reward is set to the normalized achievable rate between the corresponding vehicles. The normalization is used to upper-bound the reward. When the packet is successfully delivered, the individual reward is set to the chosen upper-bound. In the first case, upper-bounding the individual reward helps gNB acquire useful information for future decisions whereas in the second case, choosing the individual reward to be the upper-bound teaches gNB the best possible decisions to take in the future and helps in maximizing the number of successfully delivered packets. The achievable data rate is calculated based on the signal to interference-plus-noise ratio (sirr) according to uplink NOMA. The overall reward signal that gNB receives is thus the sum of individual rewards of each source vehicle. The goal of DQL is to maximize the cumulative reward over"
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.503,
508
+ 0.511,
509
+ 0.922,
510
+ 0.722
511
+ ],
512
+ "angle": 0,
513
+ "content": "the long-run, given some initial state of the IoV environment. This cumulative reward is the sum over many time steps of the weighted rewards where the weight is proportional to some constant called the discount factor. This discount factor makes future rewards more important for gNB agent as their corresponding weight becomes larger. In IoVRA problem, since the proposed MDP model consists of episodes of finite length, i.e., each episode lasts a finite number of slots, IoVRA belongs to the finite horizon set of problems [21]. Further, since we aim to maximize the number of successfully delivered packets, then the MEC-based gNB agent can simply choose the discount factor to be one or a number that is close to one in order to accumulate higher rewards and thus a higher number of successfully delivered packets."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.504,
519
+ 0.723,
520
+ 0.922,
521
+ 0.798
522
+ ],
523
+ "angle": 0,
524
+ "content": "4) The Probability Transition: The probability of moving to the next state while being in an old state and taking some action depends on the highly dynamic IoV environment and cannot be explicitly calculated. This transition happens due to the channel coefficients variation and vehicles mobility."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.503,
530
+ 0.798,
531
+ 0.922,
532
+ 0.935
533
+ ],
534
+ "angle": 0,
535
+ "content": "5) Training in DQL: The DQL algorithm is composed of two parts: training and inference. The training is composed of several episodes where each episode spans the number of slots. DQL uses DNNs to approximate the Q function. We leverage DQL with prioritized replay memory andueling. In general experience replay memory helps to remember and use past experiences. Standard replay memory is used to sample experience transitions uniformly without paying attention to the significance of the sampled experiences. Prioritized expe"
536
+ },
537
+ {
538
+ "type": "list",
539
+ "bbox": [
540
+ 0.503,
541
+ 0.723,
542
+ 0.922,
543
+ 0.935
544
+ ],
545
+ "angle": 0,
546
+ "content": null
547
+ }
548
+ ],
549
+ [
550
+ {
551
+ "type": "page_number",
552
+ "bbox": [
553
+ 0.912,
554
+ 0.031,
555
+ 0.921,
556
+ 0.04
557
+ ],
558
+ "angle": 0,
559
+ "content": "5"
560
+ },
561
+ {
562
+ "type": "text",
563
+ "bbox": [
564
+ 0.075,
565
+ 0.07,
566
+ 0.492,
567
+ 0.129
568
+ ],
569
+ "angle": 0,
570
+ "content": "rience replay memory is proposed to pay more attention to important experiences. This indeed makes the learning better. Also,ueling is proposed as a new neural network architecture that represents two estimators for the Q function."
571
+ },
572
+ {
573
+ "type": "text",
574
+ "bbox": [
575
+ 0.075,
576
+ 0.129,
577
+ 0.493,
578
+ 0.735
579
+ ],
580
+ "angle": 0,
581
+ "content": "In detail, the training lasts a number of episodes and requires as input the IoV environment which includes the vehicles, the channel coefficients, the packet requirements, the available RBs and any other relevant IoV network parameter. It returns as output the trained DQN. The first step in DQL is to start the simulator which generates the vehicles and all network parameters, then it initializes the DQN hyperparameters. In the beginning of the first slot, the initial state of the IoV environment (initial distances of the vehicles, etc.) is revealed to gNB. Next, DQL iterates the episodes. For each episode, the environment is built by (i) updating the network parameters, e.g., the leftover bits of each source vehicle are updated based on the previous episodes, and (ii) moving the vehicles according to the mobility model. Next, the exploration rate \\(\\epsilon\\) is annealed based on the episode index. Annealing the exploration rate over time is a technique used in RL to solve the dilemma between exploration and exploitation, i.e., as the time goes by, we decrease \\(\\epsilon\\) to increase the exploitation probability as the agent starts to learn something useful. After a few episodes, the value of \\(\\epsilon\\) is no longer decreased. Then, gNB chooses for each source vehicle an action that is a tuple of the coverage distance, the packet, the frequency slot, and the power level. Once gNB agent chooses its action according to the annealed \\(\\epsilon\\), it calculates the reward signal. Specifically, a destination vehicle calculates the received sinr, finds the number of bits a source vehicle is transmitting, and communicates this information to gNB using feedback channels. The environment moves to the next state and gNB adds to its prioritized replay memory the actual experience with some associated priority, i.e., the obtained tuple (state, action, reward, next state) is associated some priority. Initially, gNB assigns random priorities to its experiences but the priorities change as it starts to learn and updates its DQN parameters. gNB samples a mini-batch from its prioritized replay memory according to their priorities that forms a dataset used to train the DQN. gNB uses a variant of the well-known stochastic gradient descent to minimize the loss and it updates the priorities of the sampled experiences proportionally to the value of the loss. Finally, once in a while, the trained DQN is copied into the target DQN."
582
+ },
583
+ {
584
+ "type": "text",
585
+ "bbox": [
586
+ 0.075,
587
+ 0.734,
588
+ 0.492,
589
+ 0.914
590
+ ],
591
+ "angle": 0,
592
+ "content": "6) Implementing DQL: The inference of DQL is as follows (see Fig. 2). First, the trained DQN is loaded. Also, the annealed \\(\\epsilon\\) is loaded from the last training episode (the index of the episode is also revealed). Then, for each episode (which represents a new random channel realization), the environment is reset and built—initializing the network parameters and the transmission behaviors of each agent. Next, for each slot, gNB agent, after observing the environment, chooses the best action according to its trained DQN after feedback communication between itself and the destination vehicles. Then, the reward signal is obtained, and the next episode starts with a new random channel realization."
593
+ },
594
+ {
595
+ "type": "text",
596
+ "bbox": [
597
+ 0.076,
598
+ 0.915,
599
+ 0.492,
600
+ 0.946
601
+ ],
602
+ "angle": 0,
603
+ "content": "The inference in DQL is done in an online fashion. That is, it is executed in each slot without knowing the future"
604
+ },
605
+ {
606
+ "type": "text",
607
+ "bbox": [
608
+ 0.503,
609
+ 0.07,
610
+ 0.923,
611
+ 0.176
612
+ ],
613
+ "angle": 0,
614
+ "content": "observations. The training in DQL is the most computationally intensive task. It is executed for a large number of episodes and can be done in an offline manner with different channel conditions and IoV network topologies. Note that training in DQL needs to be re-executed only when the topology of the IoV network undergoes significant changes, depending on the IoV network dynamics."
615
+ },
616
+ {
617
+ "type": "title",
618
+ "bbox": [
619
+ 0.606,
620
+ 0.193,
621
+ 0.822,
622
+ 0.206
623
+ ],
624
+ "angle": 0,
625
+ "content": "III. PERFORMANCE EVALUATION"
626
+ },
627
+ {
628
+ "type": "text",
629
+ "bbox": [
630
+ 0.503,
631
+ 0.211,
632
+ 0.923,
633
+ 0.573
634
+ ],
635
+ "angle": 0,
636
+ "content": "In this section, we validate the proposed DQL method. The simulation setup is based on the highway scenario of [20] and most simulation parameters are taken from [22, 23]. We consider a six-lane highway with a total length of \\(2\\mathrm{km}\\) where each lane has a width of \\(4\\mathrm{m}\\). There are three lanes for the forward direction (vehicles move from right to left) and three lanes for the backward direction. The source and destination vehicles are generated according to spatial Poisson process. Vehicles' speed determine the vehicle density and the average inter-vehicle distance (in the same lane) is \\(2.5\\mathrm{s} \\times \\nu\\) where \\(\\nu\\) is the vehicle absolute speed. The speed of a vehicle depends on its lane: the \\(i\\)th forward lane (from top to bottom with \\(i \\in \\{1, 2, 3\\}\\)) is characterized by the speed of \\(60 + 2(i - 1) \\times 10\\mathrm{km/h}\\), whereas the \\(i\\)th backward lane (from top to bottom with \\(i \\in \\{1, 2, 3\\}\\)) is characterized by the speed of \\(100 - 2(i - 1) \\times 10\\mathrm{km/h}\\). The number of source vehicles \\(m\\) and destination vehicles \\(n\\) is randomly chosen. The important simulation parameters are given as follows [22, 23]. The carrier frequency is \\(2\\mathrm{GHz}\\), the per-RB bandwidth is \\(1\\mathrm{MHz}\\), the vehicle antenna height is \\(1.5\\mathrm{m}\\), the vehicle antenna gain is \\(3\\mathrm{dBi}\\), the vehicle receiver noise figure is \\(9\\mathrm{dB}\\), the shadowing distribution is log-normal, the fast fading is Rayleigh, the pathloss model is LOS in WINNER + B1, the shadowing standard deviation is \\(3\\mathrm{dB}\\), and the noise power \\(N_0\\) is \\(-114\\mathrm{dBm}\\)."
637
+ },
638
+ {
639
+ "type": "text",
640
+ "bbox": [
641
+ 0.504,
642
+ 0.574,
643
+ 0.923,
644
+ 0.799
645
+ ],
646
+ "angle": 0,
647
+ "content": "Unless specified otherwise, the slice 1 packet's size is randomly chosen in \\(\\{0.1..1\\}\\) Mb. The slice 2 packet's size is 600 bytes. gNB chooses a coverage (in m) from the set \\(\\{100,400,1000,1400\\} \\cup \\{0\\}\\). The power levels (in dBm) are given by \\(\\{15,23,30\\} \\cup \\{-100\\}\\) where \\(-100\\) dBm is used to indicate no transmission. We set \\(m = 3\\), \\(n = 4\\), \\(F = 2\\), and \\(T = 20\\); each slot has duration \\(5\\mathrm{ms}\\). The DQN is trained in the Julia programming language using Flux.jl. The DQN consists of an input and an output layer and of three fully connected hidden layers containing respectively 256, 128, and 120 neurons. The ReLu activation function is used in each layer. The ADAM optimizer with a learning rate of \\(10^{-5}\\) is used. The training lasts 3000 episodes with an exploration rate starting from 1 and annealed to reach 0.02 for the \\(80\\%\\) of the episodes."
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.504,
653
+ 0.8,
654
+ 0.923,
655
+ 0.937
656
+ ],
657
+ "angle": 0,
658
+ "content": "To the best of our knowledge, there are no current research works that solve IoVRA while considering the slice selection, the broadcast coverage selection, the RBs and the power allocation. We implement three benchmarks: two are based on NOMA and one is based on OMA. The partial idea of all benchmarks comes from [24] which is based on the swap matching algorithm. All benchmarks are centralized in the edge and offline. They are called OMA-MP, NOMA-MP, and NOMA-RP. In OMA-MP, every RB is used by at most one"
659
+ }
660
+ ],
661
+ [
662
+ {
663
+ "type": "page_number",
664
+ "bbox": [
665
+ 0.912,
666
+ 0.032,
667
+ 0.921,
668
+ 0.04
669
+ ],
670
+ "angle": 0,
671
+ "content": "6"
672
+ },
673
+ {
674
+ "type": "text",
675
+ "bbox": [
676
+ 0.074,
677
+ 0.07,
678
+ 0.493,
679
+ 0.25
680
+ ],
681
+ "angle": 0,
682
+ "content": "vehicle and the maximum transmission power is allocated. In NOMA-MP and NOMA-RP, every RB can be shared, and the maximum transmission power or a random transmission power are allocated, respectively. The coverage and slice selections are decided randomly at the beginning of each slot. The allocation of the RBs to the vehicles is done similarly in all benchmarks. First, an initial RB allocation is executed that gives the highest sum of channel power gain between a source vehicle and its destination vehicle. Once the initial allocation is obtained, a swap matching is performed to improve the number of packets successfully received. If no swap improves the matching, then the algorithm terminates."
683
+ },
684
+ {
685
+ "type": "text",
686
+ "bbox": [
687
+ 0.074,
688
+ 0.25,
689
+ 0.493,
690
+ 0.417
691
+ ],
692
+ "angle": 0,
693
+ "content": "In the simulation results, we present two performance metrics: the cumulative rewards for training the DQL and the number of successfully received packets for the inferring DQL. In the training, the reward signal received by gNB is given by the sum of the individual rewards of each source vehicle. The individual reward is equal either to (i) the upper-bounded achievable rate or to (ii) the upper bound. The event (i) happens when a packet is not yet delivered whereas the event (ii) happens when a packet is completely and successfully delivered. In the inference, the reward signal is simply given as the total number of successfully delivered packets."
694
+ },
695
+ {
696
+ "type": "image",
697
+ "bbox": [
698
+ 0.096,
699
+ 0.426,
700
+ 0.466,
701
+ 0.656
702
+ ],
703
+ "angle": 0,
704
+ "content": null
705
+ },
706
+ {
707
+ "type": "image_caption",
708
+ "bbox": [
709
+ 0.206,
710
+ 0.664,
711
+ 0.361,
712
+ 0.678
713
+ ],
714
+ "angle": 0,
715
+ "content": "Fig. 3: Training rewards."
716
+ },
717
+ {
718
+ "type": "text",
719
+ "bbox": [
720
+ 0.074,
721
+ 0.689,
722
+ 0.493,
723
+ 0.869
724
+ ],
725
+ "angle": 0,
726
+ "content": "Fig. 3 illustrates the convergence of the proposed DQL algorithm versus training episodes. The figure shows the cumulative average rewards per episode where the average is taken over the last 200 episodes. It is clear that the average reward improves as the training episodes increase. This shows the effectiveness of the proposed algorithm. The training in DQL gradually converges starting from the episode number \\(\\approx 2700\\). Note that the convergence of the algorithm is not smooth and contains some fluctuations which is due mainly to the high mobility nature of the IoV environment. Based on Fig. 3, DQN is trained for 3000 episodes to provide some convergence guarantees."
727
+ },
728
+ {
729
+ "type": "text",
730
+ "bbox": [
731
+ 0.074,
732
+ 0.87,
733
+ 0.492,
734
+ 0.947
735
+ ],
736
+ "angle": 0,
737
+ "content": "In the next two figures, we present, as a performance metric, the reward obtained in the inference part of DQL, which is the number of successfully received packets. We show this performance metric as stacked bars where each bar is divided into two parts: the lower part indicates the number"
738
+ },
739
+ {
740
+ "type": "text",
741
+ "bbox": [
742
+ 0.504,
743
+ 0.069,
744
+ 0.923,
745
+ 0.101
746
+ ],
747
+ "angle": 0,
748
+ "content": "of successfully delivered slice 1 packets and the higher part indicates the number of successfully delivered slice 2 packets."
749
+ },
750
+ {
751
+ "type": "image",
752
+ "bbox": [
753
+ 0.52,
754
+ 0.113,
755
+ 0.905,
756
+ 0.372
757
+ ],
758
+ "angle": 0,
759
+ "content": null
760
+ },
761
+ {
762
+ "type": "image_caption",
763
+ "bbox": [
764
+ 0.612,
765
+ 0.375,
766
+ 0.816,
767
+ 0.4
768
+ ],
769
+ "angle": 0,
770
+ "content": "Fig. 4: Impact of safety message sizes"
771
+ },
772
+ {
773
+ "type": "text",
774
+ "bbox": [
775
+ 0.503,
776
+ 0.415,
777
+ 0.923,
778
+ 0.595
779
+ ],
780
+ "angle": 0,
781
+ "content": "Fig. 4 shows the performance of DQL against the benchmarks when varying the slice 2 packet sizes. We can see that DQL succeeds in delivering more packets without having the full and future CSI as in the benchmarks. For example, DQL can, on average, deliver successfully almost 9 packets. However, other benchmarks can only deliver, on average, almost 6 packets. NOMA-RP achieves the lowest performance as expected. Further, DQL achieves a higher number of successfully delivered slice 2 packets. This is particularly important in IoV communication as slice 2 packets are mainly safety packets and thus must have a higher priority of being delivered."
782
+ },
783
+ {
784
+ "type": "image",
785
+ "bbox": [
786
+ 0.522,
787
+ 0.607,
788
+ 0.903,
789
+ 0.865
790
+ ],
791
+ "angle": 0,
792
+ "content": null
793
+ },
794
+ {
795
+ "type": "image_caption",
796
+ "bbox": [
797
+ 0.612,
798
+ 0.868,
799
+ 0.816,
800
+ 0.894
801
+ ],
802
+ "angle": 0,
803
+ "content": "Fig. 5: Impact of safety message deadlines"
804
+ },
805
+ {
806
+ "type": "text",
807
+ "bbox": [
808
+ 0.505,
809
+ 0.911,
810
+ 0.923,
811
+ 0.943
812
+ ],
813
+ "angle": 0,
814
+ "content": "Fig. 5 shows the performance of DQL against the benchmarks when varying the slice 2 packets deadlines. DQL"
815
+ }
816
+ ],
817
+ [
818
+ {
819
+ "type": "page_number",
820
+ "bbox": [
821
+ 0.912,
822
+ 0.031,
823
+ 0.921,
824
+ 0.04
825
+ ],
826
+ "angle": 0,
827
+ "content": "7"
828
+ },
829
+ {
830
+ "type": "text",
831
+ "bbox": [
832
+ 0.074,
833
+ 0.07,
834
+ 0.493,
835
+ 0.159
836
+ ],
837
+ "angle": 0,
838
+ "content": "still achieves the best performance when the deadline of the safety packets increases. The gap between DQL and other benchmarks widens further as the deadline increases. We further notice that NOMA-RP has the worst performance for all algorithms which shows the need of a suitable power allocation method in IoVRA."
839
+ },
840
+ {
841
+ "type": "text",
842
+ "bbox": [
843
+ 0.076,
844
+ 0.16,
845
+ 0.494,
846
+ 0.418
847
+ ],
848
+ "angle": 0,
849
+ "content": "We notice from both Fig. 4 and Fig. 5 that there is an unfair allocation of resources between the packets of the two slices. This is mainly due to highly dynamic nature of the IoV network (e.g., vehicle positions, their speeds, etc.). For example, if a source vehicle is located close to a destination vehicle, then the quality of the wireless link between both vehicles will likely be good. Thus, gNB learns through DQL to equally likely transmit both packets. However, in the case where the source vehicle is located far away from the corresponding destination vehicle, the quality of the wireless link between both parties will probably be poor and thus, gNB will likely learn through DQL to transmit only slice 2 packets to guarantee a successful V2V communication (since slice 2 packets might not require a large number of RBs compared to slice 1 packets). It is thus important to study the fairness among different slices in such IoV network, which will be investigated in our future works."
850
+ },
851
+ {
852
+ "type": "title",
853
+ "bbox": [
854
+ 0.152,
855
+ 0.434,
856
+ 0.415,
857
+ 0.448
858
+ ],
859
+ "angle": 0,
860
+ "content": "IV. CONCLUSIONS AND FUTURE WORKS"
861
+ },
862
+ {
863
+ "type": "text",
864
+ "bbox": [
865
+ 0.074,
866
+ 0.453,
867
+ 0.495,
868
+ 0.664
869
+ ],
870
+ "angle": 0,
871
+ "content": "In this paper, we developed an online MEC-based scheme to solve the slice selection, coverage selection, resource block and non-orthogonal multiple access power allocation problem in the Internet of vehicles network. We modelled the problem as a single agent Markov decision process and developed a DQL algorithm. The proposed DQL algorithm is proven robust and effective against various system parameters including the high mobility characteristics of IoV networks. It also outperformed some baseline benchmark algorithms that are based on global and offline decisions. In future works, we will investigate a two-time scale DRL approach that decides for coverage and slice selection on a slower time scale. Further, we will study the fairness of multiple slices. Finally, we will extend our system model to include mmWave communications."
872
+ },
873
+ {
874
+ "type": "title",
875
+ "bbox": [
876
+ 0.207,
877
+ 0.682,
878
+ 0.362,
879
+ 0.695
880
+ ],
881
+ "angle": 0,
882
+ "content": "V. ACKNOWLEDGMENT"
883
+ },
884
+ {
885
+ "type": "text",
886
+ "bbox": [
887
+ 0.075,
888
+ 0.7,
889
+ 0.493,
890
+ 0.762
891
+ ],
892
+ "angle": 0,
893
+ "content": "The authors would like to thank the Natural Sciences and Engineering Research Council of Canada (NSERC) and the Fonds de recherche du Quebec - Nature et technologies (FRQNT), for the financial support of this research."
894
+ },
895
+ {
896
+ "type": "title",
897
+ "bbox": [
898
+ 0.241,
899
+ 0.778,
900
+ 0.327,
901
+ 0.79
902
+ ],
903
+ "angle": 0,
904
+ "content": "REFERENCES"
905
+ },
906
+ {
907
+ "type": "ref_text",
908
+ "bbox": [
909
+ 0.085,
910
+ 0.798,
911
+ 0.49,
912
+ 0.832
913
+ ],
914
+ "angle": 0,
915
+ "content": "[1] A. Triwinarko, I. Dayoub, and S. Cherkaoui, \"Phy layer enhancements for next generation v2x communication,\" *Vehicular Communications*, vol. 32, p. 100385, 2021."
916
+ },
917
+ {
918
+ "type": "ref_text",
919
+ "bbox": [
920
+ 0.085,
921
+ 0.833,
922
+ 0.49,
923
+ 0.855
924
+ ],
925
+ "angle": 0,
926
+ "content": "[2] A. Alalewi, I. Dayoub, and S. Cherkaoui, \"On 5g-v2x use cases and enabling technologies: a comprehensive survey,\" IEEE Access, 2021."
927
+ },
928
+ {
929
+ "type": "ref_text",
930
+ "bbox": [
931
+ 0.086,
932
+ 0.856,
933
+ 0.49,
934
+ 0.89
935
+ ],
936
+ "angle": 0,
937
+ "content": "[3] R. Soua, I. Turcanu, F. Adamsky, D. Führer, and T. Engel, \"Multi-Access Edge Computing for Vehicular Networks: A Position Paper,\" in Proc. IEEE Globecom Workshops (GC Wkshps), 2018, pp. 1-6."
938
+ },
939
+ {
940
+ "type": "ref_text",
941
+ "bbox": [
942
+ 0.086,
943
+ 0.89,
944
+ 0.492,
945
+ 0.924
946
+ ],
947
+ "angle": 0,
948
+ "content": "[4] Z. Mika and S. Cherkaoui, “Network slicing for vehicular communications: a multi-agent deep reinforcement learning approach,” Annals of Telecommunications, vol. 76, no. 9, pp. 665–683, 2021."
949
+ },
950
+ {
951
+ "type": "list",
952
+ "bbox": [
953
+ 0.085,
954
+ 0.798,
955
+ 0.492,
956
+ 0.924
957
+ ],
958
+ "angle": 0,
959
+ "content": null
960
+ },
961
+ {
962
+ "type": "ref_text",
963
+ "bbox": [
964
+ 0.515,
965
+ 0.071,
966
+ 0.922,
967
+ 0.117
968
+ ],
969
+ "angle": 0,
970
+ "content": "[5] C. Campolo, A. Molinaro, A. Iera, R. R. Fontes, and C. E. Rothenberg, \"Towards 5G Network Slicing for the V2X Ecosystem,\" in Proc. IEEE Conf. on Netw. Softwarization and Workshops (NetSoft), 2018, pp. 400-405."
971
+ },
972
+ {
973
+ "type": "ref_text",
974
+ "bbox": [
975
+ 0.514,
976
+ 0.118,
977
+ 0.922,
978
+ 0.173
979
+ ],
980
+ "angle": 0,
981
+ "content": "[6] H. Khan, P. Luoto, S. Samarakoon, M. Bennis, and M. Latva-Aho, \"Network Slicing for Vehicular Communication,\" Transactions on Emerging Telecommunications Technologies, p. e3652, e3652 ett.3652. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/ett.3652"
982
+ },
983
+ {
984
+ "type": "ref_text",
985
+ "bbox": [
986
+ 0.514,
987
+ 0.174,
988
+ 0.921,
989
+ 0.208
990
+ ],
991
+ "angle": 0,
992
+ "content": "[7] M. Azizian, S. Cherkaoui, and A. S. Hafid, \"Vehicle software updates distribution with sdn and cloud computing,\" IEEE Communications Magazine, vol. 55, no. 8, pp. 74-79, 2017."
993
+ },
994
+ {
995
+ "type": "ref_text",
996
+ "bbox": [
997
+ 0.514,
998
+ 0.209,
999
+ 0.921,
1000
+ 0.242
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "[8] L. Liang, H. Ye, G. Yu, and G. Y. Li, “Deep-Learning-Based Wireless Resource Allocation With Application to Vehicular Networks,” Proc. IEEE, vol. 108, no. 2, pp. 341–356, 2020."
1004
+ },
1005
+ {
1006
+ "type": "ref_text",
1007
+ "bbox": [
1008
+ 0.514,
1009
+ 0.243,
1010
+ 0.921,
1011
+ 0.276
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "[9] A. Abouaomar, S. Cherkaoui, Z. Mlika, and A. Kobbane, \"Service function chaining in mec: A mean-field game and reinforcement learning approach,\" arXiv preprint arXiv:2105.04701, 2021."
1015
+ },
1016
+ {
1017
+ "type": "ref_text",
1018
+ "bbox": [
1019
+ 0.509,
1020
+ 0.277,
1021
+ 0.921,
1022
+ 0.322
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "[10] 3GPP, \"Study on NR Vehicle-to-Everything (V2X),\" 3rd Generation Partnership Project (3GPP), Technical Report (TR) 38.885, 03 2019, version 16.0.0. [Online]. Available: https://portal.3gpp.org/Desktopmodules/Specifications/SpecificationDetails.aspx?specifiedId=3497"
1026
+ },
1027
+ {
1028
+ "type": "ref_text",
1029
+ "bbox": [
1030
+ 0.509,
1031
+ 0.322,
1032
+ 0.921,
1033
+ 0.355
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "[11] B. Di, L. Song, Y. Li, and Z. Han, \"V2X Meets NOMA: Non-Orthogonal Multiple Access for 5G-Enabled Vehicular Networks,\" IEEE Wireless Commun., vol. 24, no. 6, pp. 14-21, 2017."
1037
+ },
1038
+ {
1039
+ "type": "ref_text",
1040
+ "bbox": [
1041
+ 0.509,
1042
+ 0.356,
1043
+ 0.921,
1044
+ 0.412
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "[12] 5GCAR, \"Final Design and Evaluation of the 5G V2X System Level Architecture and Security Framework,\" The 5G Infrastructure Public Private Partnership (5GPPP), Deliverable D4.2, 11 2019, version 1.1. [Online]. Available: https://ec.europa.eu/research/participants/documents/downloadPublic?documentIds=080166e5c9d36fbc&appId=PPGMS"
1048
+ },
1049
+ {
1050
+ "type": "ref_text",
1051
+ "bbox": [
1052
+ 0.509,
1053
+ 0.412,
1054
+ 0.92,
1055
+ 0.456
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "[13] S. A. Ashraf, R. Blasco, H. Do, G. Fodor, C. Zhang, and W. Sun, \"Supporting Vehicle-to-Everything Services by 5G New Radio Release-16 Systems,\" IEEE Commun. Standards Mag., vol. 4, no. 1, pp. 26-32, 2020."
1059
+ },
1060
+ {
1061
+ "type": "ref_text",
1062
+ "bbox": [
1063
+ 0.508,
1064
+ 0.457,
1065
+ 0.921,
1066
+ 0.491
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "[14] M. Azizian, S. Cherkoui, and A. S. Hafid, “A distributed d-hop cluster formation for vanet,” in 2016 IEEE wireless communications and networking conference. IEEE, 2016, pp. 1-6."
1070
+ },
1071
+ {
1072
+ "type": "ref_text",
1073
+ "bbox": [
1074
+ 0.509,
1075
+ 0.492,
1076
+ 0.92,
1077
+ 0.525
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "[15] ——, “A distributed cluster based transmission scheduling in vanet,” in 2016 IEEE international conference on communications (ICC). IEEE, 2016, pp. 1-6."
1081
+ },
1082
+ {
1083
+ "type": "ref_text",
1084
+ "bbox": [
1085
+ 0.509,
1086
+ 0.526,
1087
+ 0.921,
1088
+ 0.569
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "[16] ——, “Dcv: A distributed cluster formation for vanet based on end-to-end reactive mobility,” in 2016 International Wireless Communications and Mobile Computing Conference (IWCMC). IEEE, 2016, pp. 287–291."
1092
+ },
1093
+ {
1094
+ "type": "ref_text",
1095
+ "bbox": [
1096
+ 0.508,
1097
+ 0.57,
1098
+ 0.92,
1099
+ 0.593
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "[17] M. Azizian, S. Cherkaoui, and A. Hafid, \"An optimized flow allocation in vehicular cloud,\" IEEE Access, vol. 4, pp. 6766-6779, 2016."
1103
+ },
1104
+ {
1105
+ "type": "ref_text",
1106
+ "bbox": [
1107
+ 0.509,
1108
+ 0.594,
1109
+ 0.921,
1110
+ 0.627
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "[18] V. Mnih et al., \"Human-Level Control Through Deep Reinforcement Learning,\" Nature, vol. 518, no. 7540, pp. 529-533, 02 2015. [Online]. Available: http://dx.doi.org/10.1038/nature14236"
1114
+ },
1115
+ {
1116
+ "type": "ref_text",
1117
+ "bbox": [
1118
+ 0.509,
1119
+ 0.628,
1120
+ 0.92,
1121
+ 0.66
1122
+ ],
1123
+ "angle": 0,
1124
+ "content": "[19] Y. S. Nasir and D. Guo, \"Multi-Agent Deep Reinforcement Learning for Dynamic Power Allocation in Wireless Networks,\" IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2239-2250, 2019."
1125
+ },
1126
+ {
1127
+ "type": "ref_text",
1128
+ "bbox": [
1129
+ 0.509,
1130
+ 0.661,
1131
+ 0.921,
1132
+ 0.718
1133
+ ],
1134
+ "angle": 0,
1135
+ "content": "[20] 3GPP, \"Study on Evaluation Methodology of New Vehicle-to-Everything (V2X) Use Cases for LTE and NR,\" 3rd Generation Partnership Project (3GPP), Technical Report (TR) 37.885, 06 2019, version 15.3.0. [Online]. Available: https://portal.3gpp.org/desktopmodules/ Specifications/SpecificationDetails.aspx?specificationId=3209"
1136
+ },
1137
+ {
1138
+ "type": "ref_text",
1139
+ "bbox": [
1140
+ 0.509,
1141
+ 0.718,
1142
+ 0.921,
1143
+ 0.762
1144
+ ],
1145
+ "angle": 0,
1146
+ "content": "[21] M. J. Kochenderfer, C. Amato, G. Chowdhary, J. P. How, H. J. D. Reynolds, J. R. Thornton, P. A. Torres-Carrasquillo, N. K. Üre, and J. Vian, Decision Making Under Uncertainty: Theory and Application, 1st ed. The MIT Press, 2015."
1147
+ },
1148
+ {
1149
+ "type": "ref_text",
1150
+ "bbox": [
1151
+ 0.509,
1152
+ 0.763,
1153
+ 0.921,
1154
+ 0.797
1155
+ ],
1156
+ "angle": 0,
1157
+ "content": "[22] L. Liang, H. Ye, and G. Y. Li, \"Spectrum Sharing in Vehicular Networks Based on Multi-Agent Reinforcement Learning,\" IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2282-2292, 2019."
1158
+ },
1159
+ {
1160
+ "type": "ref_text",
1161
+ "bbox": [
1162
+ 0.509,
1163
+ 0.798,
1164
+ 0.921,
1165
+ 0.83
1166
+ ],
1167
+ "angle": 0,
1168
+ "content": "[23] L. Wang, H. Ye, L. Liang, and G. Y. Li, \"Learn to Compress CSI and Allocate Resources in Vehicular Networks,\" IEEE Trans. Commun., vol. 68, no. 6, pp. 3640-3653, 2020."
1169
+ },
1170
+ {
1171
+ "type": "ref_text",
1172
+ "bbox": [
1173
+ 0.509,
1174
+ 0.831,
1175
+ 0.921,
1176
+ 0.866
1177
+ ],
1178
+ "angle": 0,
1179
+ "content": "[24] M. Zeng, A. Yadav, O. A. Dobre, and H. V. Poor, \"Energy-Efficient Joint User-RB Association and Power Allocation for Uplink Hybrid NOMA-OMA,\" IEEE Internet Things J., vol. 6, no. 3, pp. 5119-5131, Jun. 2019."
1180
+ },
1181
+ {
1182
+ "type": "list",
1183
+ "bbox": [
1184
+ 0.508,
1185
+ 0.071,
1186
+ 0.922,
1187
+ 0.866
1188
+ ],
1189
+ "angle": 0,
1190
+ "content": null
1191
+ }
1192
+ ]
1193
+ ]
2201.11xxx/2201.11295/8a760769-4fd1-4824-9420-1bc0f6b26c9e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:050c717b46e1da2d903d18d742638abf669c971383a9308d51ded83eb586c105
3
+ size 695966
2201.11xxx/2201.11295/full.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Network Slicing with MEC and Deep
2
+
3
+ # Reinforcement Learning for the Internet of Vehicles
4
+
5
+ Zoubeir Mlika, Member, IEEE, and Soumaya Cherkoui, Senior Member, IEEE
6
+
7
+ Abstract—The interconnection of vehicles in the future fifth generation (5G) wireless ecosystem forms the so-called Internet of vehicles (IoV). IoV offers new kinds of applications requiring delay-sensitive, compute-intensive and bandwidth-hungry services. Mobile edge computing (MEC) and network slicing (NS) are two of the key enabler technologies in 5G networks that can be used to optimize the allocation of the network resources and guarantee the diverse requirements of IoV applications.
8
+
9
+ As traditional model-based optimization techniques generally end up with NP-hard and strongly non-convex and non-linear mathematical programming formulations, in this paper, we introduce a model-free approach based on deep reinforcement learning (DRL) to solve the resource allocation problem in MEC-enabled IoV network based on network slicing. Furthermore, the solution uses non-orthogonal multiple access (NOMA) to enable a better exploitation of the scarce channel resources. The considered problem addresses jointly the channel and power allocation, the slice selection and the vehicles selection (vehicles grouping). We model the problem as a single-agent Markov decision process. Then, we solve it using DRL using the well-known DQL algorithm. We show that our approach is robust and effective under different network conditions compared to benchmark solutions.
10
+
11
+ # I. INTRODUCTION
12
+
13
+ The Internet of vehicles (IoV) is an emerging concept that enhances the existing capabilities of vehicular communication by integrating with the Internet of things (IoT). IoV is a key use-case in the upcoming beyond fifth generation (5G) wireless networks [1, 2]. IoV creates diverse new applications with extremely diverse service requirements including ultrahigh reliable and delay-sensitive, bandwidth-hungry as well as compute-intensive applications [3]. For example, accident reports require ultra-reliable and extremely low latency whereas high definition map sharing require high bandwidth. An important open question in today's IoV networks is "how to support, using a unified air interface, future IoV services while guaranteeing their extremely diverse performance requirements?" Network slicing (NS) is a potential solution to respond to this question [4-6]. NS is a tool that enables network operators to support virtualized end-to-end networks that belongs to the principle of software defined networking [7]. It mainly allows creating different logical networks on the top of a common and programmable physical infrastructure. Another technology, namely mobile edge computing, or better known as multi-access edge computing (MEC), is considered as an
14
+
15
+ Zoubeir Mlika and Soumaya Cherkaoui are with the research laboratory on intelligent engineering for communications and networking (INTERLAB), Faculty of Engineering, Department of Electrical and Computer Science Engineering, University of Sherbrooke, Sherbrooke J1K 2R1, Quebec, Canada, (e-mail: zoubeir.mlika@usherbrooke.ca, soumaya.cherkaoui@usherbrooke.ca).
16
+
17
+ important building block in the future IoV ecosystem. The joint implementation of NS and MEC is a key enabler for IoV networks. These two technologies can be used not only to guarantee the diverse requirements of IoV applications but also to deploy the diverse vehicular services at the appropriate locations [3].
18
+
19
+ Optimal resource allocation in IoV would go through traditional model-based optimization techniques. Due to the complex and highly dynamic nature of IoV, such a model-based approach is not very appealing. In fact, such approach ends up with strongly non-convex optimization problems that are generally NP-hard [8]. Thus, a model-free machine learning approach is crucial.
20
+
21
+ Reinforcement learning (RL) is a useful technique in solving NP-hard optimization problems. It has been applied successfully to solve very hard problems in different research areas including wireless networks [9]. It is based on Markov decision process (MDP) modeling where agents learn to select the best actions through repeated interactions with an unknown environment by receiving numerical reward signals [8]. Deep RL (DRL) uses the strong ability of neural networks to generalize across enormous state spaces and reduce the complexity of a solution, thus improving the learning process.
22
+
23
+ In this paper, using DRL, we propose a new solution framework to solve the challenging problem of resource allocation in a MEC-enabled IoV network. More specifically, we focus on the in-coverage scenario of 5G-new radio (5G-NR) in which vehicles communicate with each other through a base station, e.g., NodeB (gNB), that performs MEC-based tasks [10]. We focus on the broadcast communication technique. Due to the scarce spectrum resources, non-orthogonal multiple access (NOMA) is also used in our proposed framework. NOMA is a promising technique to increase the spectral efficiency in vehicular networks [11].
24
+
25
+ In more detail, the considered resource allocation problem, called IoV resource allocation (IoVRA), involves the allocation of four resources: the slice (deciding which packet to send), the coverage of the broadcast (deciding the range of the broadcast), the resource blocks (RBs), and the power. By carefully allocating these four resources, and by applying the successive interference cancellation (SIC) at the corresponding destination vehicles, NOMA can help in boosting the capacity of the IoV network. The use of NOMA in broadcast communications is different from the usual uplink and downlink NOMA techniques, which is due from the broadcast nature in IoV networks, i.e., two source vehicles broadcast with two distinct transmission powers to the same group of destination vehicles.
26
+
27
+ ![](images/5c5e51abb5c4aecc5121cfa7f84630892678529f42bc73b026ac25cf2e54e9e6.jpg)
28
+ Fig. 1: Two network slices in an IoV-based MEC network.
29
+
30
+ Even though we propose a MEC-based IoV solution for the case of vehicle-to-vehicle (V2V) communications, our proposed system model is valid for vehicle-to-infrastructure (V2I) communications as well. Indeed, in V2I communications, a vehicle communicates with a gNB-type road side unit (RSU) or a user-type RSU through the cellular Uu or the sidelink (SL) connectivity [12]. For the case of user-type RSU communications, the coverage range selection decision will simply include the RSU. For the case of gNB-type RSU communications, the broadcast coverage range selection could be ignored and replaced by RSU association. Thus, our proposed solution framework is still valid for both V2V and V2I communications.
31
+
32
+ To the best of our knowledge, this is the first work that proposes a model-free DRL framework to solve IoVRA in MEC-enabled IoV networks based on broadcast, NS and NOMA. The contributions of our work are the following. We model IoVRA as a single agent MDP. Next, we propose a deep-Q-learning (DQL) algorithm to solve it. Finally, we show that our proposed DQL algorithm outperforms benchmark algorithms.
33
+
34
+ # A. Organization
35
+
36
+ The article is organized as follows. Section II presents the system model, the single agent MDP, and describes the proposed DQL algorithm. Section III presents benchmark algorithmic solutions and gives the simulation results. Finally, section IV draws some conclusions and discusses interesting open research questions.
37
+
38
+ # II. PROPOSED DRL FOR INTERNET OF VEHICLES
39
+
40
+ # A. Internet of Vehicles Model
41
+
42
+ We consider an IoV network composed of a set of source vehicles that generate packets, and a set of destination vehicles that receive packets. All vehicles operate in the in-coverage scenario of 5G-NR [10] and thus they are covered by some gNB that performs edge computing. A source vehicle uses broadcast communications to transmit to a subset of the destination vehicles. The time is slotted into a set of slots. The total bandwidth is divided into a set of frequency slots. A resource block (RB) is given by the pair (frequency, slot).
43
+
44
+ The proposed system model supports several use cases, including advanced driving with trajectory sharing, extended sensors [13] and is valid for both V2V and V2I communications. To provide guaranteed quality of service requirements
45
+
46
+ to the different use cases, NS is used, which is an efficient solution in IoV networks [6]. It mainly creates logical networks on the top of a common and programmable MEC-enabled IoV infrastructure. We create two network slices. The first slice (slice 1) is designed for non-safety applications such as video streaming. The second slice (slice 2) is designed for safety applications such as emergency warnings. An example of the MEC-enabled NS system model is given in Fig. 1, where vehicles communicate with gNBs that are connected to MEC servers. On top of this network infrastructure, two network slices are created to support IoV applications. Slice 1 is designated for high throughput or enhanced mobile broadband communication (eMBB) and slice 2 is designated for ultra-reliable and low latency communication (uRLLC).
47
+
48
+ Each source vehicle has two different packets for each slice, where slice 1's packet $(\mathsf{pkt}^n)$ requires high throughput whereas slice 2's packet $(\mathsf{pkt}^s)$ has stringent latency requirements. For any packet to be delivered successfully, the corresponding source vehicle requires a set of RBs such that the achievable data rates are above the minimum requirements. Packet $\mathsf{pkt}^n$ can be transmitted using any RBs from the frequency-slot resource pool with a carefully chosen transmission power per each RB. However, $\mathsf{pkt}^s$ , having an arrival time and a deadline, can be transmitted using any frequency slot but only using slots between its arrival time and deadline with a carefully chosen transmission power per each RB. The wireless channel gain between two vehicles includes fast and slow fading.
49
+
50
+ A source vehicle has to decide which packet to send, at what range to broadcast, what RBs to use, and what transmission powers to allocate. The range broadcasting optimization is similar to the classical vehicle clustering [14-17]. To improve the spectral efficiency of the IoV network, we use NOMA to superimpose the transmissions of the source vehicles transmitting to some destination vehicle, which uses SIC to decode the superimposed transmissions.
51
+
52
+ # B. Proposed Deep-Q-Learning Algorithm
53
+
54
+ Vehicles operate in the coverage of gNB with MEC, that collects information about vehicles and performs pilot estimation to obtain the channel statistics. Based on the obtained feedback information, gNB observes the IoV environment and makes decisions. It plays the role of an intelligent entity in a single agent MDP. With the help of DRL, gNB learns to solve efficiently the complicated IoVRA problem. Specifically, gNB implements the well-known DQL approach [18]. DQL has mainly two parts: training and inference. In training, gNB trains a deep-Q-network (DQN), whereas in inference, it takes actions according to its trained DQN. DQL is an improvement of the so-called QL algorithm that is based on a tabular method which creates a table of state-action pairs. QL explores the action space using an exploration policy, e.g., $\epsilon$ -greedy. Despite the proven effectiveness of QL, it generally fails when the state and action spaces become large as in IoVRA.
55
+
56
+ DQL is a promising technique that is proposed to solve the curse of dimensionality in RL by approximating the Q action-value function using deep learning. One way to solve IoVRA is through multi-agent DRL by combining independent QL for
57
+
58
+ each agent. That is, each agent tries to learn its own policy based on its own observations and actions while treating all other agents as part of the environment. This badly influences the result of the training as it creates a non-stationary environment that changes as other agents take decisions. For this reason, a MEC-enabled IoV network facilitates the training in such situation by modeling IoVRA as a single agent who performs the training at the edge of the IoV network. The system architecture of the proposed DQN approach is given in Fig. 2, in which gNB and MEC server interact with the IoV environment and take decisions accordingly.
59
+
60
+ Before describing in detail DQL, first, IoVRA is modeled as a single agent MDP given by the quadruple: state space, action space, reward function and transition probability. The agent in this MDP is the gNB, which takes an action, receives a reward and moves to the next state based on its interaction with the unknown IoV environment. This interaction helps gNB gain more experiences and improves its accumulated reward.
61
+
62
+ 1) The State Space: At any slot, any state of the IoV environment is unknown directly to gNB. Instead, gNB receives an observation from the IoV environment. In our model, an observation includes local channel state information (CSI) and the transmission behavior of the source vehicles. More precisely, an observation includes the large and small-scale fading values between vehicles. These values can be accurately estimated by the destination vehicles and fed back to gNB without significant delay [19]. The observation also includes a decision variable that indicates whether the source vehicles transmitted in previous slots and if so which packet did they transmit. The third observation indicates the number of leftover bits of packets that each source vehicle needs to send (e.g., initially, the number of leftover bits correspond to the packets sizes). The fourth observation element includes the arrival time and the deadline of slice 2 packets.
63
+ 2) The Action Space: IoVRA is solved in an online fashion where at each slot, gNB makes a decision that includes (i) the broadcast coverage range selection (ii) the slice selection (iii) the RB allocation, and (iv) the power allocation. For (i), we define a discrete set of coverage distances (including zero). Thus, if gNB chooses a coverage distance (or 0), then it will broadcast (or does not) to all destination vehicles within the chosen coverage circle having as radius the indicated range. For (ii), we define a discrete set of packets (including the empty set) that indicates which packet gNB will decide to transmit. At each slot, each source vehicle has three possible choices: it does not transmit, it transmits a slice 1 packet, or it transmits a slice 2 packet. For (iii), the RB allocation is about choosing the frequency slot to be used in the current slot. For (iv), gNB carefully chooses the transmission power per RB. Note that continuous power allocation makes the implementation of DQL more complex and thus, to keep things simple, we use a discrete set of power levels that gNB can use. Finally, the action space of gNB is given by the Cartesian product of these four discrete sets.
64
+ 3) The Reward Signal: We mainly focus on maximizing the packet reception ratio (PRR) [20] in IoV broadcast networks. PRR is defined in as follows: for one packet and one source vehicle, the PRR is given by the percentage of vehicles with
65
+
66
+ ![](images/b03b659301edbca6f3a398d4ab61bbb85953f2912a1d5ac8981581c69bbdd50e.jpg)
67
+ Fig. 2: IoV-based DRL architecture.
68
+
69
+ successful reception among the total number of receptions. PRR directly relates to the number of successfully received packets. Therefore, our main goal is to maximize the later.
70
+
71
+ The reward signal at any slot is the sum of individual rewards of each source vehicle. Hence, the reward signal depends on whether each source vehicle has successfully transmitted its packet or not. Technically, since we aim to maximize the number of successfully received packets, we set the reward to one once a packet is successfully delivered and zero otherwise. However, this leads to poor design since the zero individual reward leads to no useful information for learning. Thus, we build the individual reward design based on the following. When a packet is not successfully delivered or the delivery has not been completed yet, the individual reward is set to the normalized achievable rate between the corresponding vehicles. The normalization is used to upper-bound the reward. When the packet is successfully delivered, the individual reward is set to the chosen upper-bound. In the first case, upper-bounding the individual reward helps gNB acquire useful information for future decisions whereas in the second case, choosing the individual reward to be the upper-bound teaches gNB the best possible decisions to take in the future and helps in maximizing the number of successfully delivered packets. The achievable data rate is calculated based on the signal to interference-plus-noise ratio (sirr) according to uplink NOMA. The overall reward signal that gNB receives is thus the sum of individual rewards of each source vehicle. The goal of DQL is to maximize the cumulative reward over
72
+
73
+ the long-run, given some initial state of the IoV environment. This cumulative reward is the sum over many time steps of the weighted rewards where the weight is proportional to some constant called the discount factor. This discount factor makes future rewards more important for gNB agent as their corresponding weight becomes larger. In IoVRA problem, since the proposed MDP model consists of episodes of finite length, i.e., each episode lasts a finite number of slots, IoVRA belongs to the finite horizon set of problems [21]. Further, since we aim to maximize the number of successfully delivered packets, then the MEC-based gNB agent can simply choose the discount factor to be one or a number that is close to one in order to accumulate higher rewards and thus a higher number of successfully delivered packets.
74
+
75
+ 4) The Probability Transition: The probability of moving to the next state while being in an old state and taking some action depends on the highly dynamic IoV environment and cannot be explicitly calculated. This transition happens due to the channel coefficients variation and vehicles mobility.
76
+ 5) Training in DQL: The DQL algorithm is composed of two parts: training and inference. The training is composed of several episodes where each episode spans the number of slots. DQL uses DNNs to approximate the Q function. We leverage DQL with prioritized replay memory andueling. In general experience replay memory helps to remember and use past experiences. Standard replay memory is used to sample experience transitions uniformly without paying attention to the significance of the sampled experiences. Prioritized expe
77
+
78
+ rience replay memory is proposed to pay more attention to important experiences. This indeed makes the learning better. Also,ueling is proposed as a new neural network architecture that represents two estimators for the Q function.
79
+
80
+ In detail, the training lasts a number of episodes and requires as input the IoV environment which includes the vehicles, the channel coefficients, the packet requirements, the available RBs and any other relevant IoV network parameter. It returns as output the trained DQN. The first step in DQL is to start the simulator which generates the vehicles and all network parameters, then it initializes the DQN hyperparameters. In the beginning of the first slot, the initial state of the IoV environment (initial distances of the vehicles, etc.) is revealed to gNB. Next, DQL iterates the episodes. For each episode, the environment is built by (i) updating the network parameters, e.g., the leftover bits of each source vehicle are updated based on the previous episodes, and (ii) moving the vehicles according to the mobility model. Next, the exploration rate $\epsilon$ is annealed based on the episode index. Annealing the exploration rate over time is a technique used in RL to solve the dilemma between exploration and exploitation, i.e., as the time goes by, we decrease $\epsilon$ to increase the exploitation probability as the agent starts to learn something useful. After a few episodes, the value of $\epsilon$ is no longer decreased. Then, gNB chooses for each source vehicle an action that is a tuple of the coverage distance, the packet, the frequency slot, and the power level. Once gNB agent chooses its action according to the annealed $\epsilon$ , it calculates the reward signal. Specifically, a destination vehicle calculates the received sinr, finds the number of bits a source vehicle is transmitting, and communicates this information to gNB using feedback channels. The environment moves to the next state and gNB adds to its prioritized replay memory the actual experience with some associated priority, i.e., the obtained tuple (state, action, reward, next state) is associated some priority. Initially, gNB assigns random priorities to its experiences but the priorities change as it starts to learn and updates its DQN parameters. gNB samples a mini-batch from its prioritized replay memory according to their priorities that forms a dataset used to train the DQN. gNB uses a variant of the well-known stochastic gradient descent to minimize the loss and it updates the priorities of the sampled experiences proportionally to the value of the loss. Finally, once in a while, the trained DQN is copied into the target DQN.
81
+
82
+ 6) Implementing DQL: The inference of DQL is as follows (see Fig. 2). First, the trained DQN is loaded. Also, the annealed $\epsilon$ is loaded from the last training episode (the index of the episode is also revealed). Then, for each episode (which represents a new random channel realization), the environment is reset and built—initializing the network parameters and the transmission behaviors of each agent. Next, for each slot, gNB agent, after observing the environment, chooses the best action according to its trained DQN after feedback communication between itself and the destination vehicles. Then, the reward signal is obtained, and the next episode starts with a new random channel realization.
83
+
84
+ The inference in DQL is done in an online fashion. That is, it is executed in each slot without knowing the future
85
+
86
+ observations. The training in DQL is the most computationally intensive task. It is executed for a large number of episodes and can be done in an offline manner with different channel conditions and IoV network topologies. Note that training in DQL needs to be re-executed only when the topology of the IoV network undergoes significant changes, depending on the IoV network dynamics.
87
+
88
+ # III. PERFORMANCE EVALUATION
89
+
90
+ In this section, we validate the proposed DQL method. The simulation setup is based on the highway scenario of [20] and most simulation parameters are taken from [22, 23]. We consider a six-lane highway with a total length of $2\mathrm{km}$ where each lane has a width of $4\mathrm{m}$ . There are three lanes for the forward direction (vehicles move from right to left) and three lanes for the backward direction. The source and destination vehicles are generated according to spatial Poisson process. Vehicles' speed determine the vehicle density and the average inter-vehicle distance (in the same lane) is $2.5\mathrm{s} \times \nu$ where $\nu$ is the vehicle absolute speed. The speed of a vehicle depends on its lane: the $i$ th forward lane (from top to bottom with $i \in \{1, 2, 3\}$ ) is characterized by the speed of $60 + 2(i - 1) \times 10\mathrm{km/h}$ , whereas the $i$ th backward lane (from top to bottom with $i \in \{1, 2, 3\}$ ) is characterized by the speed of $100 - 2(i - 1) \times 10\mathrm{km/h}$ . The number of source vehicles $m$ and destination vehicles $n$ is randomly chosen. The important simulation parameters are given as follows [22, 23]. The carrier frequency is $2\mathrm{GHz}$ , the per-RB bandwidth is $1\mathrm{MHz}$ , the vehicle antenna height is $1.5\mathrm{m}$ , the vehicle antenna gain is $3\mathrm{dBi}$ , the vehicle receiver noise figure is $9\mathrm{dB}$ , the shadowing distribution is log-normal, the fast fading is Rayleigh, the pathloss model is LOS in WINNER + B1, the shadowing standard deviation is $3\mathrm{dB}$ , and the noise power $N_0$ is $-114\mathrm{dBm}$ .
91
+
92
+ Unless specified otherwise, the slice 1 packet's size is randomly chosen in $\{0.1..1\}$ Mb. The slice 2 packet's size is 600 bytes. gNB chooses a coverage (in m) from the set $\{100,400,1000,1400\} \cup \{0\}$ . The power levels (in dBm) are given by $\{15,23,30\} \cup \{-100\}$ where $-100$ dBm is used to indicate no transmission. We set $m = 3$ , $n = 4$ , $F = 2$ , and $T = 20$ ; each slot has duration $5\mathrm{ms}$ . The DQN is trained in the Julia programming language using Flux.jl. The DQN consists of an input and an output layer and of three fully connected hidden layers containing respectively 256, 128, and 120 neurons. The ReLu activation function is used in each layer. The ADAM optimizer with a learning rate of $10^{-5}$ is used. The training lasts 3000 episodes with an exploration rate starting from 1 and annealed to reach 0.02 for the $80\%$ of the episodes.
93
+
94
+ To the best of our knowledge, there are no current research works that solve IoVRA while considering the slice selection, the broadcast coverage selection, the RBs and the power allocation. We implement three benchmarks: two are based on NOMA and one is based on OMA. The partial idea of all benchmarks comes from [24] which is based on the swap matching algorithm. All benchmarks are centralized in the edge and offline. They are called OMA-MP, NOMA-MP, and NOMA-RP. In OMA-MP, every RB is used by at most one
95
+
96
+ vehicle and the maximum transmission power is allocated. In NOMA-MP and NOMA-RP, every RB can be shared, and the maximum transmission power or a random transmission power are allocated, respectively. The coverage and slice selections are decided randomly at the beginning of each slot. The allocation of the RBs to the vehicles is done similarly in all benchmarks. First, an initial RB allocation is executed that gives the highest sum of channel power gain between a source vehicle and its destination vehicle. Once the initial allocation is obtained, a swap matching is performed to improve the number of packets successfully received. If no swap improves the matching, then the algorithm terminates.
97
+
98
+ In the simulation results, we present two performance metrics: the cumulative rewards for training the DQL and the number of successfully received packets for the inferring DQL. In the training, the reward signal received by gNB is given by the sum of the individual rewards of each source vehicle. The individual reward is equal either to (i) the upper-bounded achievable rate or to (ii) the upper bound. The event (i) happens when a packet is not yet delivered whereas the event (ii) happens when a packet is completely and successfully delivered. In the inference, the reward signal is simply given as the total number of successfully delivered packets.
99
+
100
+ ![](images/4a584faf949e627ad0d602280645013c4048e5ef0d009b1c2c52d2a70cffaeba.jpg)
101
+ Fig. 3: Training rewards.
102
+
103
+ Fig. 3 illustrates the convergence of the proposed DQL algorithm versus training episodes. The figure shows the cumulative average rewards per episode where the average is taken over the last 200 episodes. It is clear that the average reward improves as the training episodes increase. This shows the effectiveness of the proposed algorithm. The training in DQL gradually converges starting from the episode number $\approx 2700$ . Note that the convergence of the algorithm is not smooth and contains some fluctuations which is due mainly to the high mobility nature of the IoV environment. Based on Fig. 3, DQN is trained for 3000 episodes to provide some convergence guarantees.
104
+
105
+ In the next two figures, we present, as a performance metric, the reward obtained in the inference part of DQL, which is the number of successfully received packets. We show this performance metric as stacked bars where each bar is divided into two parts: the lower part indicates the number
106
+
107
+ of successfully delivered slice 1 packets and the higher part indicates the number of successfully delivered slice 2 packets.
108
+
109
+ ![](images/be8dbc8dea61666a9a56204e6d7fa6441378d4f53a78d6acadef9af92e45ad59.jpg)
110
+ Fig. 4: Impact of safety message sizes
111
+
112
+ Fig. 4 shows the performance of DQL against the benchmarks when varying the slice 2 packet sizes. We can see that DQL succeeds in delivering more packets without having the full and future CSI as in the benchmarks. For example, DQL can, on average, deliver successfully almost 9 packets. However, other benchmarks can only deliver, on average, almost 6 packets. NOMA-RP achieves the lowest performance as expected. Further, DQL achieves a higher number of successfully delivered slice 2 packets. This is particularly important in IoV communication as slice 2 packets are mainly safety packets and thus must have a higher priority of being delivered.
113
+
114
+ ![](images/f88b3b69828cb5b565fc164001ae017e0d188451f7e3f4d6d499786576fe2207.jpg)
115
+ Fig. 5: Impact of safety message deadlines
116
+
117
+ Fig. 5 shows the performance of DQL against the benchmarks when varying the slice 2 packets deadlines. DQL
118
+
119
+ still achieves the best performance when the deadline of the safety packets increases. The gap between DQL and other benchmarks widens further as the deadline increases. We further notice that NOMA-RP has the worst performance for all algorithms which shows the need of a suitable power allocation method in IoVRA.
120
+
121
+ We notice from both Fig. 4 and Fig. 5 that there is an unfair allocation of resources between the packets of the two slices. This is mainly due to highly dynamic nature of the IoV network (e.g., vehicle positions, their speeds, etc.). For example, if a source vehicle is located close to a destination vehicle, then the quality of the wireless link between both vehicles will likely be good. Thus, gNB learns through DQL to equally likely transmit both packets. However, in the case where the source vehicle is located far away from the corresponding destination vehicle, the quality of the wireless link between both parties will probably be poor and thus, gNB will likely learn through DQL to transmit only slice 2 packets to guarantee a successful V2V communication (since slice 2 packets might not require a large number of RBs compared to slice 1 packets). It is thus important to study the fairness among different slices in such IoV network, which will be investigated in our future works.
122
+
123
+ # IV. CONCLUSIONS AND FUTURE WORKS
124
+
125
+ In this paper, we developed an online MEC-based scheme to solve the slice selection, coverage selection, resource block and non-orthogonal multiple access power allocation problem in the Internet of vehicles network. We modelled the problem as a single agent Markov decision process and developed a DQL algorithm. The proposed DQL algorithm is proven robust and effective against various system parameters including the high mobility characteristics of IoV networks. It also outperformed some baseline benchmark algorithms that are based on global and offline decisions. In future works, we will investigate a two-time scale DRL approach that decides for coverage and slice selection on a slower time scale. Further, we will study the fairness of multiple slices. Finally, we will extend our system model to include mmWave communications.
126
+
127
+ # V. ACKNOWLEDGMENT
128
+
129
+ The authors would like to thank the Natural Sciences and Engineering Research Council of Canada (NSERC) and the Fonds de recherche du Quebec - Nature et technologies (FRQNT), for the financial support of this research.
130
+
131
+ # REFERENCES
132
+
133
+ [1] A. Triwinarko, I. Dayoub, and S. Cherkaoui, "Phy layer enhancements for next generation v2x communication," *Vehicular Communications*, vol. 32, p. 100385, 2021.
134
+ [2] A. Alalewi, I. Dayoub, and S. Cherkaoui, "On 5g-v2x use cases and enabling technologies: a comprehensive survey," IEEE Access, 2021.
135
+ [3] R. Soua, I. Turcanu, F. Adamsky, D. Führer, and T. Engel, "Multi-Access Edge Computing for Vehicular Networks: A Position Paper," in Proc. IEEE Globecom Workshops (GC Wkshps), 2018, pp. 1-6.
136
+ [4] Z. Mika and S. Cherkaoui, “Network slicing for vehicular communications: a multi-agent deep reinforcement learning approach,” Annals of Telecommunications, vol. 76, no. 9, pp. 665–683, 2021.
137
+
138
+ [5] C. Campolo, A. Molinaro, A. Iera, R. R. Fontes, and C. E. Rothenberg, "Towards 5G Network Slicing for the V2X Ecosystem," in Proc. IEEE Conf. on Netw. Softwarization and Workshops (NetSoft), 2018, pp. 400-405.
139
+ [6] H. Khan, P. Luoto, S. Samarakoon, M. Bennis, and M. Latva-Aho, "Network Slicing for Vehicular Communication," Transactions on Emerging Telecommunications Technologies, p. e3652, e3652 ett.3652. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/ett.3652
140
+ [7] M. Azizian, S. Cherkaoui, and A. S. Hafid, "Vehicle software updates distribution with sdn and cloud computing," IEEE Communications Magazine, vol. 55, no. 8, pp. 74-79, 2017.
141
+ [8] L. Liang, H. Ye, G. Yu, and G. Y. Li, “Deep-Learning-Based Wireless Resource Allocation With Application to Vehicular Networks,” Proc. IEEE, vol. 108, no. 2, pp. 341–356, 2020.
142
+ [9] A. Abouaomar, S. Cherkaoui, Z. Mlika, and A. Kobbane, "Service function chaining in mec: A mean-field game and reinforcement learning approach," arXiv preprint arXiv:2105.04701, 2021.
143
+ [10] 3GPP, "Study on NR Vehicle-to-Everything (V2X)," 3rd Generation Partnership Project (3GPP), Technical Report (TR) 38.885, 03 2019, version 16.0.0. [Online]. Available: https://portal.3gpp.org/Desktopmodules/Specifications/SpecificationDetails.aspx?specifiedId=3497
144
+ [11] B. Di, L. Song, Y. Li, and Z. Han, "V2X Meets NOMA: Non-Orthogonal Multiple Access for 5G-Enabled Vehicular Networks," IEEE Wireless Commun., vol. 24, no. 6, pp. 14-21, 2017.
145
+ [12] 5GCAR, "Final Design and Evaluation of the 5G V2X System Level Architecture and Security Framework," The 5G Infrastructure Public Private Partnership (5GPPP), Deliverable D4.2, 11 2019, version 1.1. [Online]. Available: https://ec.europa.eu/research/participants/documents/downloadPublic?documentIds=080166e5c9d36fbc&appId=PPGMS
146
+ [13] S. A. Ashraf, R. Blasco, H. Do, G. Fodor, C. Zhang, and W. Sun, "Supporting Vehicle-to-Everything Services by 5G New Radio Release-16 Systems," IEEE Commun. Standards Mag., vol. 4, no. 1, pp. 26-32, 2020.
147
+ [14] M. Azizian, S. Cherkoui, and A. S. Hafid, “A distributed d-hop cluster formation for vanet,” in 2016 IEEE wireless communications and networking conference. IEEE, 2016, pp. 1-6.
148
+ [15] ——, “A distributed cluster based transmission scheduling in vanet,” in 2016 IEEE international conference on communications (ICC). IEEE, 2016, pp. 1-6.
149
+ [16] ——, “Dcv: A distributed cluster formation for vanet based on end-to-end reactive mobility,” in 2016 International Wireless Communications and Mobile Computing Conference (IWCMC). IEEE, 2016, pp. 287–291.
150
+ [17] M. Azizian, S. Cherkaoui, and A. Hafid, "An optimized flow allocation in vehicular cloud," IEEE Access, vol. 4, pp. 6766-6779, 2016.
151
+ [18] V. Mnih et al., "Human-Level Control Through Deep Reinforcement Learning," Nature, vol. 518, no. 7540, pp. 529-533, 02 2015. [Online]. Available: http://dx.doi.org/10.1038/nature14236
152
+ [19] Y. S. Nasir and D. Guo, "Multi-Agent Deep Reinforcement Learning for Dynamic Power Allocation in Wireless Networks," IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2239-2250, 2019.
153
+ [20] 3GPP, "Study on Evaluation Methodology of New Vehicle-to-Everything (V2X) Use Cases for LTE and NR," 3rd Generation Partnership Project (3GPP), Technical Report (TR) 37.885, 06 2019, version 15.3.0. [Online]. Available: https://portal.3gpp.org/desktopmodules/ Specifications/SpecificationDetails.aspx?specificationId=3209
154
+ [21] M. J. Kochenderfer, C. Amato, G. Chowdhary, J. P. How, H. J. D. Reynolds, J. R. Thornton, P. A. Torres-Carrasquillo, N. K. Üre, and J. Vian, Decision Making Under Uncertainty: Theory and Application, 1st ed. The MIT Press, 2015.
155
+ [22] L. Liang, H. Ye, and G. Y. Li, "Spectrum Sharing in Vehicular Networks Based on Multi-Agent Reinforcement Learning," IEEE J. Sel. Areas Commun., vol. 37, no. 10, pp. 2282-2292, 2019.
156
+ [23] L. Wang, H. Ye, L. Liang, and G. Y. Li, "Learn to Compress CSI and Allocate Resources in Vehicular Networks," IEEE Trans. Commun., vol. 68, no. 6, pp. 3640-3653, 2020.
157
+ [24] M. Zeng, A. Yadav, O. A. Dobre, and H. V. Poor, "Energy-Efficient Joint User-RB Association and Power Allocation for Uplink Hybrid NOMA-OMA," IEEE Internet Things J., vol. 6, no. 3, pp. 5119-5131, Jun. 2019.
2201.11xxx/2201.11295/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae6699ac85154b07ef47edfa3ddb610bf4e2dc0e2bdd9441fd2310304522dc27
3
+ size 318864
2201.11xxx/2201.11295/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_content_list.json ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Wireless Energy Transfer in RIS-Aided",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 181,
8
+ 73,
9
+ 816,
10
+ 108
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Cell-Free Massive MIMO Systems:",
17
+ "text_level": 1,
18
+ "bbox": [
19
+ 214,
20
+ 126,
21
+ 782,
22
+ 159
23
+ ],
24
+ "page_idx": 0
25
+ },
26
+ {
27
+ "type": "text",
28
+ "text": "Opportunities and Challenges",
29
+ "text_level": 1,
30
+ "bbox": [
31
+ 261,
32
+ 179,
33
+ 736,
34
+ 213
35
+ ],
36
+ "page_idx": 0
37
+ },
38
+ {
39
+ "type": "text",
40
+ "text": "Enyu Shi, Jiayi Zhang, Senior Member, IEEE, Shuaifei Chen, Jiakang Zheng, Yan Zhang,",
41
+ "bbox": [
42
+ 130,
43
+ 234,
44
+ 864,
45
+ 289
46
+ ],
47
+ "page_idx": 0
48
+ },
49
+ {
50
+ "type": "text",
51
+ "text": "Derrick Wing Kwan Ng, Fellow, IEEE, and Bo Ai, Fellow, IEEE",
52
+ "bbox": [
53
+ 191,
54
+ 300,
55
+ 805,
56
+ 320
57
+ ],
58
+ "page_idx": 0
59
+ },
60
+ {
61
+ "type": "text",
62
+ "text": "Abstract",
63
+ "text_level": 1,
64
+ "bbox": [
65
+ 465,
66
+ 369,
67
+ 529,
68
+ 383
69
+ ],
70
+ "page_idx": 0
71
+ },
72
+ {
73
+ "type": "text",
74
+ "text": "In future sixth-generation (6G) mobile networks, the Internet-of-Everything (IoE) is expected to provide extremely massive connectivity for small battery-powered devices. Indeed, massive devices with limited energy storage capacity impose persistent energy demand hindering the lifetime of communication networks. As a remedy, wireless energy transfer (WET) is a key technology to address these critical energy supply issues. On the other hand, cell-free (CF) massive multiple-input multiple-output (MIMO) systems offer an efficient network architecture to realize the roll-out of the IoE. In this article, we first propose the paradigm of reconfigurable intelligent surface (RIS)-aided CF massive MIMO systems for WET, including its potential application scenarios and system architecture. The four-stage transmission procedure is discussed and analyzed to illustrate the practicality of the architecture. Then we put forward and analyze the hardware design of RIS. Particularly, we discuss the three corresponding operating modes and the amalgamation of WET technology and RIS-aided CF massive MIMO. Representative simulation results are given to confirm the superior performance achieved by our proposed schemes. Also, we investigate the optimal location of deploying multiple RISs to achieve the best system performance. Finally, several important research directions of RIS-aided CF massive MIMO systems with WET are presented to inspire further potential investigation.",
75
+ "bbox": [
76
+ 150,
77
+ 404,
78
+ 846,
79
+ 737
80
+ ],
81
+ "page_idx": 0
82
+ },
83
+ {
84
+ "type": "list",
85
+ "sub_type": "text",
86
+ "list_items": [
87
+ "E. Shi, J. Zhang, S. Chen, J. Zheng, and Y. Zhang are with the School of Electronic and Information Engineering, Beijing Jiaotong University, Beijing 100044, P. R. China. (e-mail: {jiayizhang} @bjtu.edu.cn).",
88
+ "B. Ai is with the State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing 100044, China. (e-mail: boai@bjtu.edu.cn).",
89
+ "D. W. K. Ng is with the School of Electrical Engineering and Telecommunications, University of New South Wales, NSW 2052, Australia. (e-mail: w.k.ng@unsw.edu.au)."
90
+ ],
91
+ "bbox": [
92
+ 111,
93
+ 767,
94
+ 882,
95
+ 880
96
+ ],
97
+ "page_idx": 0
98
+ },
99
+ {
100
+ "type": "aside_text",
101
+ "text": "arXiv:2201.11302v2 [cs.IT] 28 Jan 2022",
102
+ "bbox": [
103
+ 22,
104
+ 253,
105
+ 57,
106
+ 681
107
+ ],
108
+ "page_idx": 0
109
+ },
110
+ {
111
+ "type": "page_number",
112
+ "text": "1",
113
+ "bbox": [
114
+ 872,
115
+ 37,
116
+ 880,
117
+ 46
118
+ ],
119
+ "page_idx": 0
120
+ },
121
+ {
122
+ "type": "text",
123
+ "text": "I. INTRODUCTION",
124
+ "text_level": 1,
125
+ "bbox": [
126
+ 418,
127
+ 82,
128
+ 578,
129
+ 99
130
+ ],
131
+ "page_idx": 1
132
+ },
133
+ {
134
+ "type": "text",
135
+ "text": "The fifth-generation (5G) wireless network has targeted a 1000-fold increase in network capacity offering ubiquitous wireless connection for at least 100 billion devices worldwide, compared with the previous generations of networks. Recently, with the large-scale commercialization of the fifth-generation (5G) worldwide, the global industry has begun initial research on the next-generation mobile communication technology, i.e., the sixth-generation (6G). One of the key performance indicators for 6G is its extremely massive connectivity for small devices to enable the so-called Internet-of-Everything (IoE) [1]. In practice, most of these IoE devices will be either battery-powered or battery-less due to the associated high-cost of applying conventional power-grid-based solutions. Unfortunately, the use of limited battery power shortens the lifetime of networks degrading the quality of service. Although frequent battery replacement offers an intermediate solution to this problem, a large number of devices in IoE would further lead to exceedingly high labor and material costs. Therefore, advanced energy replenishing solutions are urgently needed to improve the energy supply challenges of future networks.",
136
+ "bbox": [
137
+ 109,
138
+ 112,
139
+ 883,
140
+ 446
141
+ ],
142
+ "page_idx": 1
143
+ },
144
+ {
145
+ "type": "text",
146
+ "text": "Wireless energy transfer (WET) has been proposed to address various practical scenarios where adopting electrical grid is not possible, such as unmanned aerial vehicle (UAV) communications, wireless sensor networks with sensors embedded in challenging environment structures, or inside a human body [2]. By exploiting the far-field radiation properties of electromagnetic (EM) waves, the radio frequency (RF) energy signal radiated by the transmitter can be harvested at the receiver which converts it into electrical energy for future use. However, WET technologies face various technical problems such as large path loss attenuation, challenging energy beam alignment, and inefficient resource allocation. Hence, to fully unlock the potential of WET, it must be combined with other advanced communication technologies and architectures to fully unlock the potential of practical IoE networks.",
147
+ "bbox": [
148
+ 109,
149
+ 454,
150
+ 885,
151
+ 709
152
+ ],
153
+ "page_idx": 1
154
+ },
155
+ {
156
+ "type": "text",
157
+ "text": "The cellular concept has been introduced over half a century for handling a small number of users in a large area. Recently, cell-free (CF) massive MIMO systems have been proposed [3], which advocates the removal of cellular boundaries for supporting the massive number of users. In practice, this paradigm can effectively shorten the distance between communication devices and access points (APs) through distributed deployment, thereby improving communication performance. Specifically, all the APs are connected to a central processing unit (CPU) with high-speed fronthaul links. Different from conventional centralized massive MIMO systems,",
158
+ "bbox": [
159
+ 109,
160
+ 717,
161
+ 883,
162
+ 893
163
+ ],
164
+ "page_idx": 1
165
+ },
166
+ {
167
+ "type": "page_number",
168
+ "text": "2",
169
+ "bbox": [
170
+ 870,
171
+ 36,
172
+ 883,
173
+ 47
174
+ ],
175
+ "page_idx": 1
176
+ },
177
+ {
178
+ "type": "image",
179
+ "img_path": "images/a7dc43ae540c062166af605ce19b227126de2b7bf996f24dd9214c3878983126.jpg",
180
+ "image_caption": [
181
+ "Fig. 1: Application scenarios of RIS-aided CF massive MIMO systems with WET."
182
+ ],
183
+ "image_footnote": [],
184
+ "bbox": [
185
+ 168,
186
+ 84,
187
+ 831,
188
+ 308
189
+ ],
190
+ "page_idx": 2
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "APs are deployed in a certain range in a distributed manner and cooperate among themselves which offers rich spatial diversity to improve the system spectral efficiency (SE). On the other hand, the use of high-frequency bands, e.g., terahertz (THz), is expected for 6G networks to cope with the aggressive needs required in massive access [4]. Indeed, by further considering the path loss in high-frequency bands, super-dense APs have to be deployed for reducing communication distances and for ensuring line-of-sight (LoS) between APs and IoE devices. In general, to support such large-scale multiple access networks, a large amount of energy would be radiated, while the increased interference imposed by super-dense APs has to be carefully managed and controlled. Therefore, it is imperative to study an innovative, spectrally, and energy-efficient, but low-cost 6G wireless network solution.",
195
+ "bbox": [
196
+ 109,
197
+ 393,
198
+ 883,
199
+ 648
200
+ ],
201
+ "page_idx": 2
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "Recently, reconfigurable intelligent surface (RIS) has been proposed as a promising new technology for reconfiguring the wireless propagation environment through software-controlled signal reflection [5]–[7]. Specifically, RIS requires only low power consumption and low cost. Undoubtedly, RIS can address the shortcomings of CF architecture in future communications and these two technologies complement each other. On the other hand, although RIS was initially proposed as a passive component, the coordination of a large number of elements still requires a certain amount of electrical energy [7]. When there are a large number of RISs, configuring a physical link for each RIS would cause huge resource consumption. Therefore, WET technology is an excellent solution to replace the grid energy-based approach. The energy supply of RIS through WET technology is expected to realize passive deployment of RIS, reduce hardware",
206
+ "bbox": [
207
+ 109,
208
+ 657,
209
+ 883,
210
+ 912
211
+ ],
212
+ "page_idx": 2
213
+ },
214
+ {
215
+ "type": "page_number",
216
+ "text": "3",
217
+ "bbox": [
218
+ 872,
219
+ 37,
220
+ 880,
221
+ 46
222
+ ],
223
+ "page_idx": 2
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "overhead, and improve the system energy efficiency. Despite its great potential, there are relatively little researches on RIS-aided CF massive MIMO systems at present. In fact, some authors have studied the system performance of a single-RIS system, or optimized communication problems in multiple RIS systems [8]. Also, the authors have studied the combination of RIS and WET and the corresponding optimization through advanced optimization [9]. The results unveiled the non-trivial trade-off between achieving RIS self-sustainability and the system sum-rate. Others have studied the system performance of a single RIS-aided CF massive MIMO systems under idealistic conditions such as with sufficient energy storage and Rayleigh fading channels [10]. Besides, the authors introduced a precoding framework for RIS-aided CF networks [11]. Nevertheless, there is a lack of thorough research on the study of RIS-aided CF massive MIMO systems and their applications with WET technology.",
228
+ "bbox": [
229
+ 109,
230
+ 82,
231
+ 883,
232
+ 364
233
+ ],
234
+ "page_idx": 3
235
+ },
236
+ {
237
+ "type": "text",
238
+ "text": "In this article, we try to answer the question: How to apply WET technology in RIS-aided CF massive MIMO systems? To fully exploit the RIS benefits and the WET technology, we design the system architecture applying the WET technology to RIS and discuss its potential future application scenarios. In addition, we design the transmission procedures of the considered system and analyze each procedure. Based on this, we design different hardware architectures of RIS with WET technology for different practical scenarios. Meanwhile, we propose and compare different operation modes of this system, which provide useful insights for the implication of RIS-aided CF massive MIMO systems. With the novel system architecture, we also discuss how to deploy RIS to achieve better system performance. Finally, we highlight potential research directions that deserve further study.",
239
+ "bbox": [
240
+ 109,
241
+ 369,
242
+ 885,
243
+ 625
244
+ ],
245
+ "page_idx": 3
246
+ },
247
+ {
248
+ "type": "text",
249
+ "text": "II. MAIN APPLICATION SCENARIOS AND SYSTEM ARCHITECTURE",
250
+ "text_level": 1,
251
+ "bbox": [
252
+ 215,
253
+ 654,
254
+ 774,
255
+ 670
256
+ ],
257
+ "page_idx": 3
258
+ },
259
+ {
260
+ "type": "text",
261
+ "text": "In this section, we introduce the main application scenarios of RIS-aided CF massive MIMO systems in future wireless networks and analyze the characteristics of different scenarios. Meanwhile, we provide a detailed introduction to the architecture of the considered system and the corresponding transmission procedure.",
262
+ "bbox": [
263
+ 109,
264
+ 684,
265
+ 883,
266
+ 781
267
+ ],
268
+ "page_idx": 3
269
+ },
270
+ {
271
+ "type": "text",
272
+ "text": "A. Main Application Scenarios",
273
+ "text_level": 1,
274
+ "bbox": [
275
+ 109,
276
+ 813,
277
+ 369,
278
+ 830
279
+ ],
280
+ "page_idx": 3
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "In the current research, e.g., [6], [7] as a passive device, RIS can be flexibly deployed in communication systems for improving the coverage area and providing wireless services for dead spots. In future 6G network architecture, RIS should be combined with other technologies",
285
+ "bbox": [
286
+ 109,
287
+ 844,
288
+ 883,
289
+ 914
290
+ ],
291
+ "page_idx": 3
292
+ },
293
+ {
294
+ "type": "page_number",
295
+ "text": "4",
296
+ "bbox": [
297
+ 870,
298
+ 37,
299
+ 883,
300
+ 47
301
+ ],
302
+ "page_idx": 3
303
+ },
304
+ {
305
+ "type": "image",
306
+ "img_path": "images/5769aeb19d15bd1a24f70f854116f0eca3416e63d07421c57d86885c04b162fb.jpg",
307
+ "image_caption": [
308
+ "Fig. 2: System architecture of RIS-aided CF massive MIMO systems with WET."
309
+ ],
310
+ "image_footnote": [],
311
+ "bbox": [
312
+ 344,
313
+ 82,
314
+ 655,
315
+ 354
316
+ ],
317
+ "page_idx": 4
318
+ },
319
+ {
320
+ "type": "text",
321
+ "text": "to achieve better communication performance. Our proposed RIS-aided CF massive MIMO communication architecture serves as an excellent candidate for better exploiting the advantages of WET technology and realizing the vision of IoE.",
322
+ "bbox": [
323
+ 109,
324
+ 440,
325
+ 883,
326
+ 510
327
+ ],
328
+ "page_idx": 4
329
+ },
330
+ {
331
+ "type": "text",
332
+ "text": "The future wireless networks are expected to make full use of the low, medium, and high full-spectrum resources to achieve seamless global coverage of space, sky, and earth trinity, such that they can satisfy the stringent demand for establishing unlimited safe and reliable \"human-machine-object\" connections anytime and anywhere. Indeed, the success of this desired vision relies on the support of massive access required by the IoE, requiring higher transmission rates, lower delays, and higher reliability [12]. The main scenes include two categories, densely populated spaces and large-scale factories with densely deployed equipment. As shown in Fig. 1, crowded spaces include large indoor shopping malls, basketball courts, restaurants, stadiums, and so on. In contrast, the scene of a large-scale factory with densely deployed equipment is relatively static and the only main challenge is to facilitate energy harvesting at potential equipment. Note that in the factory scenario, CPU and AP are internal devices rather than external additional devices. The main feature of the former is that the mobility of personnel is relatively high and the wireless equipment in the latter has only limited mobility. For the former scenes, there is generally more uncertainty in wireless communication channels. For example, for the indoor scenario, under the original CF system architecture, we can deploy",
333
+ "bbox": [
334
+ 109,
335
+ 518,
336
+ 883,
337
+ 906
338
+ ],
339
+ "page_idx": 4
340
+ },
341
+ {
342
+ "type": "page_number",
343
+ "text": "5",
344
+ "bbox": [
345
+ 872,
346
+ 37,
347
+ 880,
348
+ 46
349
+ ],
350
+ "page_idx": 4
351
+ },
352
+ {
353
+ "type": "image",
354
+ "img_path": "images/ff669911f60226f4397ed8ecada4f6b96b06fdabe9ccc175dd8001762e985394.jpg",
355
+ "image_caption": [
356
+ "Fig. 3: Four-stage transmission procedure of RIS-aided CF massive MIMO systems with WET."
357
+ ],
358
+ "image_footnote": [],
359
+ "bbox": [
360
+ 312,
361
+ 82,
362
+ 674,
363
+ 354
364
+ ],
365
+ "page_idx": 5
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "multiple RISs in some communication dead zones and improve the quality of communication by increasing the number of RISs. Since WET technology is exploited to supply the necessary energy to RISs, there is no need to deploy physical power lines for charging which is more flexible in practical implementation. Moreover, in the indoor environment, there is an upsurge in the demand for temporary communication such as large-scale activities in which the role of flexible RIS deployment is particularly prominent. As for outdoor environments such as the gymnasium, we can deploy an unmanned aerial vehicle (UAV) on-demand as a carrier of RIS to offer signal coverage and enhancement through intelligent design of the track. On the other hand, for the latter scenes that equipment is not mobile, we only need to deploy the RIS in a fixed location in advance to improve the system performance.",
370
+ "bbox": [
371
+ 111,
372
+ 444,
373
+ 883,
374
+ 700
375
+ ],
376
+ "page_idx": 5
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "B. System Architecture",
381
+ "text_level": 1,
382
+ "bbox": [
383
+ 112,
384
+ 731,
385
+ 303,
386
+ 748
387
+ ],
388
+ "page_idx": 5
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "As shown in Fig. 2, the system is based on the CF architecture with an \"additional\" RIS layer between the user equipments (UEs) and the APs. The existing CF massive MIMO system is a three-tier structure composed of the CPU, APs, and UEs. Our proposed system architecture realizes a 3.5-layer architecture, which adds a cascading link through RIS. When there is a direct path, the APs can receive the signal from the UEs via two uplink paths: the direct link and the aggregated link through the RIS. As such, we treat the channels through the RIS with",
393
+ "bbox": [
394
+ 111,
395
+ 762,
396
+ 885,
397
+ 912
398
+ ],
399
+ "page_idx": 5
400
+ },
401
+ {
402
+ "type": "page_number",
403
+ "text": "6",
404
+ "bbox": [
405
+ 872,
406
+ 37,
407
+ 883,
408
+ 47
409
+ ],
410
+ "page_idx": 5
411
+ },
412
+ {
413
+ "type": "text",
414
+ "text": "an additional 0.5 layer. In particular, when the direct path is blocked, this proposed structure can still guarantee stable user communication.",
415
+ "bbox": [
416
+ 109,
417
+ 82,
418
+ 883,
419
+ 128
420
+ ],
421
+ "page_idx": 6
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "The CPU has an extremely high computational capability, which not only can receive or transmit a large amount of information but also process the large number of AP receiving data [13]. In contrast, the AP has limited computing resources which are equipped with a simple radio frequency antenna. In the considered system, the difference between the proposed RIS and the traditional one is that our proposed one allows RIS to be equipped with a wireless energy harvesting module, which replaces the original wired-power circuit. By adopting the WET technology, AP is considered as an energy source to charge the RIS module wirelessly to ensure the normal operation of RIS.",
426
+ "bbox": [
427
+ 109,
428
+ 135,
429
+ 883,
430
+ 338
431
+ ],
432
+ "page_idx": 6
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "To realize this paradigm, we introduce the flow of the four-stage transmission procedure of the system in Fig. 3. Stage I: Downlink energy transmission: The CPU collects the information transmitted by the AP via the fronthaul and sends control commands to the AP after signal processing. Based on the received signal, AP decides whether to transmit wireless energy signals to the RIS for energy harvesting. Especially, when the stored energy level in the RIS exceeds a threshold value, the RIS controller transmits a feedback signal to the AP for terminating the energy transmission. Stage II: Data transmission: In the uplink, the UE first sends a pilot signal to the APs which reaches the APs directly or through the RISs cascaded channel. Then the APs receive the pilot signal and convey it to the CPU via dedicated fronthaul for channel estimation. Subsequently, the UE sends their uplink data and the APs receive the signal via a direct path as well as the reflected signal arriving through RISs beamforming. Besides, the APs send their received data to the CPU, which performs joint signal processing for data detection. Finally, the CPU obtains the uplink decoded signal based on the global channel estimation. Then the downlink data signal is generated by the downlink precoder and transmitted to the AP, which finally reaches the UEs through the RIS. Meanwhile, AP generates the signal to control the RIS for phases adaptation. Note that if the AP desires to realize dynamic control to RIS, it is necessary to modify the frame structure and to insert some control time slots. Correspondingly, deploying signal processing modules at RIS may be needed to respond to the control signals.",
437
+ "bbox": [
438
+ 109,
439
+ 345,
440
+ 885,
441
+ 811
442
+ ],
443
+ "page_idx": 6
444
+ },
445
+ {
446
+ "type": "text",
447
+ "text": "III. DEPLOYMENT DESIGN",
448
+ "text_level": 1,
449
+ "bbox": [
450
+ 380,
451
+ 838,
452
+ 612,
453
+ 854
454
+ ],
455
+ "page_idx": 6
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "In this section, we propose a RIS hardware design scheme supported by the WET technology. On this basis, we explore different system operation modes and compare their advantages and",
460
+ "bbox": [
461
+ 109,
462
+ 869,
463
+ 883,
464
+ 914
465
+ ],
466
+ "page_idx": 6
467
+ },
468
+ {
469
+ "type": "page_number",
470
+ "text": "7",
471
+ "bbox": [
472
+ 870,
473
+ 36,
474
+ 883,
475
+ 47
476
+ ],
477
+ "page_idx": 6
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "disadvantages. Finally, we discuss various practical scenarios on how to effectively determine the location of RIS deployments in practice.",
482
+ "bbox": [
483
+ 109,
484
+ 82,
485
+ 883,
486
+ 127
487
+ ],
488
+ "page_idx": 7
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "A. Hardware Design",
493
+ "text_level": 1,
494
+ "bbox": [
495
+ 109,
496
+ 159,
497
+ 285,
498
+ 176
499
+ ],
500
+ "page_idx": 7
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "The hardware implementation of RIS is based on the concept of \"metasurface\", each element of which is a programmable sub-wavelength structural unit composed of two-dimensional metamaterials [14]. In practice, the field-programmable gate array (FPGA) can be used as a controller to achieve flexible control of the RIS, which usually communicates and coordinates with other network components (e.g., BS, APs, and UEs) through dedicated links. Although FPGA consumes a small amount of energy, it still needs some electrical power source to support its operation. As shown in Fig. 4, we introduce the wireless energy scavenging module to the original RIS panel and exploit some elements for energy reception and other elements for signal reflection. The energy harvesting elements are connected with a piece of energy storage hardware (e.g., a rechargeable battery), which can store the harvested energy and support the energy consumption of other elements performing reflection.",
505
+ "bbox": [
506
+ 109,
507
+ 189,
508
+ 883,
509
+ 470
510
+ ],
511
+ "page_idx": 7
512
+ },
513
+ {
514
+ "type": "text",
515
+ "text": "RIS is generally assembled by hundreds of elements [6], so it is worth exploring which elements are selected for serving as energy receiving modules. Here, we have designed and compared three types of hardware structures in Fig. 4. The first type consists of a complete separation of the energy harvesting elements and the information reflecting elements which are easy to implement in hardware. Yet, when the RIS panel is large, there would be some energy reception dead spots in this design due to non-uniform energy flux created by impinging signals. Based on this, we further propose an improved block structure, i.e., deployment energy harvesting elements at the four corners of the RIS, i.e., type 2, which can alleviate the impacts of dead spots but introduce moderate hardware implementation difficulty. Finally, we also design a scattered structure that aims at reducing the impacts caused by dead zones for energy harvesting or signal reflection, but at the cost of higher hardware complexity. In practice, the energy consumption of the centralized and distributed element designs mainly depends on the number of elements used for information reflection. Indeed, in addition to considering the balance between the system performance and the implementation complexity, the ratio between the number of RIS elements in energy harvesting mode and that in reflecting mode is another key issue. This is determined by various factors, such as distance, energy conversion efficiency, and the channel environment.",
516
+ "bbox": [
517
+ 109,
518
+ 478,
519
+ 885,
520
+ 891
521
+ ],
522
+ "page_idx": 7
523
+ },
524
+ {
525
+ "type": "page_number",
526
+ "text": "8",
527
+ "bbox": [
528
+ 870,
529
+ 37,
530
+ 880,
531
+ 47
532
+ ],
533
+ "page_idx": 7
534
+ },
535
+ {
536
+ "type": "text",
537
+ "text": "In practice, the information and energy elements ratios can be adaptively adjusted according to the feedback of the actual parameters to realize dynamic assignment [9].",
538
+ "bbox": [
539
+ 111,
540
+ 82,
541
+ 883,
542
+ 127
543
+ ],
544
+ "page_idx": 8
545
+ },
546
+ {
547
+ "type": "image",
548
+ "img_path": "images/9021a1480b99404764ff42cd140d56dec3e29ced42efef799c0df0e5aeed7ef0.jpg",
549
+ "image_caption": [
550
+ "Fig. 4: Hardware designs for information and energy transmission in a RIS. Type 1: Energy harvesting and information reflecting elements are completely separated and distributed in blocks. Type 2: Block structure with incomplete separation of energy harvesting and information reflection elements. Type 3: Energy harvesting and information reflecting elements alternate with each other in a scattered distribution."
551
+ ],
552
+ "image_footnote": [],
553
+ "bbox": [
554
+ 202,
555
+ 152,
556
+ 364,
557
+ 296
558
+ ],
559
+ "page_idx": 8
560
+ },
561
+ {
562
+ "type": "image",
563
+ "img_path": "images/8a7507b0433af4224e4644d9e3959354d7b674144a547c4bc17d999cb37e8b1c.jpg",
564
+ "image_caption": [],
565
+ "image_footnote": [],
566
+ "bbox": [
567
+ 367,
568
+ 152,
569
+ 645,
570
+ 297
571
+ ],
572
+ "page_idx": 8
573
+ },
574
+ {
575
+ "type": "image",
576
+ "img_path": "images/44d1e634b3a52519b3d67fd6b6f4484e9385e8369f58a4ac4a077bfd580beb7b.jpg",
577
+ "image_caption": [],
578
+ "image_footnote": [],
579
+ "bbox": [
580
+ 671,
581
+ 152,
582
+ 797,
583
+ 297
584
+ ],
585
+ "page_idx": 8
586
+ },
587
+ {
588
+ "type": "text",
589
+ "text": "B. Operation Modes",
590
+ "text_level": 1,
591
+ "bbox": [
592
+ 112,
593
+ 493,
594
+ 284,
595
+ 508
596
+ ],
597
+ "page_idx": 8
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "In practice, there are different ways for deploying RISs leading to different system operation modes. They affect the hardware design complexity and signal processing methods, as well as the synergy between individual communication devices, which inevitably brings different system performances. In this section, we discuss several different operation modes of RIS for WET and information reflection.",
602
+ "bbox": [
603
+ 109,
604
+ 523,
605
+ 883,
606
+ 645
607
+ ],
608
+ "page_idx": 8
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "1) Centralized RIS: As shown in Fig. 6, we first consider that there is only a single large-scale centralized RIS with massive elements. It is configured with high computational power and signal processing capability at the RIS controller, which can reduce the processing burden of the AP. Meanwhile, the centralized RIS has high beamforming capability that can assist the APs to service massive UEs via excellent interference management [9]. Moreover, the centralized design facilitates highly efficient wireless energy focusing, while it is easier to deploy and maintain the equipment. However, due to the architecture of centralized RIS, the path loss of the long-distance devices can be severe as UEs are randomly deployed. Also, the ability of WET to keep such a sophisticated controller functioning properly still needs to be explored. Besides, the requirements for hardware design are also demanding resulting in high cost.",
613
+ "bbox": [
614
+ 111,
615
+ 655,
616
+ 883,
617
+ 910
618
+ ],
619
+ "page_idx": 8
620
+ },
621
+ {
622
+ "type": "page_number",
623
+ "text": "9",
624
+ "bbox": [
625
+ 872,
626
+ 37,
627
+ 883,
628
+ 47
629
+ ],
630
+ "page_idx": 8
631
+ },
632
+ {
633
+ "type": "text",
634
+ "text": "2) Non-cooperative Distributed RIS: In this case, we assume that multiple RISs have noncooperatively deployed in a service area. This distributed deployment is more flexible and can avoid dead spots in WET as much as possible. Meanwhile, the distance between RIS-AP and RIS-UE is shortened that reducing the path loss of WET. As such, significant improvements in energy efficiency and communication quality can be achieved. In practice, the hardware design of each RIS is still relatively simple since it only expects to implement simple signal reflection. Besides, since multiple non-cooperative RISs are distributed in the service area, the signals between different RISs may interfere with each other, which degrades the communication performance of UEs.",
635
+ "bbox": [
636
+ 109,
637
+ 82,
638
+ 883,
639
+ 310
640
+ ],
641
+ "page_idx": 9
642
+ },
643
+ {
644
+ "type": "text",
645
+ "text": "3) Cooperative Distributed RIS: Similar to the last operation mode, we consider that there are multiple RISs in a service area. The difference is that there is some intelligent cooperation among RISs through physical links or dedicated wireless communication as shown in Fig. 6. Note that when there is a physical link connected to the RIS, WET is regarded as supplementary electric energy to reduce the required energy consumption. This operation mode not only reduces the path loss of WET and information communication but also enables better beam management among multiple RISs through cooperation thus reducing interference. However, it also places additional requirements on the hardware design of RISs, such as how to implement physical connections between multiple RISs and the design of the controller. As such, RIS will generate additional energy consumption for channel estimation and cooperation. However, due to the stable characteristics of the channel, frequent communication between RISs is not required, so the required energy consumption is still reasonable. In practice, a high computational requirement at the CPU and advanced optimization algorithms are needed to achieve efficient and intelligent cooperation of RISs. In practice, centralized RIS is more suitable for small-scale networks with dense users. In contrast, distributed RIS has better performance in large-scale networks due to its inherent ability in exploiting spatial diversities.",
646
+ "bbox": [
647
+ 109,
648
+ 318,
649
+ 885,
650
+ 734
651
+ ],
652
+ "page_idx": 9
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "In Fig. 5, we compare different RIS operation modes for different element numbers within an area of $1000\\mathrm{m}^2$ and consider the effect of phase errors. The achievable rate (bps/Hz) and the transmit power (dBm) are the two main performance metrics for information communication. It shows that RIS can significantly improve the system performance and compared with centralized RIS, distributed deployment can obtain higher system achievable rate. Moreover, phase errors reduce the performance of the system and the influence of phase errors is more significant with the increase of the RIS element numbers.",
657
+ "bbox": [
658
+ 109,
659
+ 738,
660
+ 883,
661
+ 914
662
+ ],
663
+ "page_idx": 9
664
+ },
665
+ {
666
+ "type": "page_number",
667
+ "text": "10",
668
+ "bbox": [
669
+ 867,
670
+ 36,
671
+ 883,
672
+ 46
673
+ ],
674
+ "page_idx": 9
675
+ },
676
+ {
677
+ "type": "image",
678
+ "img_path": "images/52618527df25fc75cfd7c2265d38666cda59f368ece562e7c3bf21bec17a3526.jpg",
679
+ "image_caption": [
680
+ "Fig. 5: Achievable rate of different RIS operation modes and elements with phase errors."
681
+ ],
682
+ "image_footnote": [],
683
+ "bbox": [
684
+ 359,
685
+ 92,
686
+ 635,
687
+ 263
688
+ ],
689
+ "page_idx": 10
690
+ },
691
+ {
692
+ "type": "image",
693
+ "img_path": "images/4b1b9c86b45b08aba41cf7f38830e7efa6bcd40fd039b72da9b30af6e694a057.jpg",
694
+ "image_caption": [
695
+ "Fig. 6: Comparison of different RIS operation modes."
696
+ ],
697
+ "image_footnote": [],
698
+ "bbox": [
699
+ 230,
700
+ 330,
701
+ 769,
702
+ 463
703
+ ],
704
+ "page_idx": 10
705
+ },
706
+ {
707
+ "type": "text",
708
+ "text": "C. RIS Deployment",
709
+ "text_level": 1,
710
+ "bbox": [
711
+ 112,
712
+ 550,
713
+ 277,
714
+ 566
715
+ ],
716
+ "page_idx": 10
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "How to judiciously deploy multiple RISs in a hybrid wireless network containing passive RISs, active CPUs, and APs to optimize the system performance is a critical problem that needs to be solved. Previous studies have provided some solutions for RIS location deployment, such as deploying a RIS close to the UE or the AP side can obtain higher system performance [15]. Unfortunately, the considered system is not a single point-to-point communication and the RIS needs support from the WET technology. In the considered network, energy and information transmission are two equally important performance metrics.",
721
+ "bbox": [
722
+ 109,
723
+ 580,
724
+ 883,
725
+ 755
726
+ ],
727
+ "page_idx": 10
728
+ },
729
+ {
730
+ "type": "text",
731
+ "text": "First, from the perspective of optimizing the system communication performance, the RIS should be intuitively deployed in a location with a clear LoS from the AP to maximize its received signal power for passive beamforming and WET. Although this deployment scheme enjoys good performance for the case of a single UE, it does not work well when the number of UEs increases, due to the lack of enough non-LoS paths to achieve high-rank MIMO channels. To enhance the experience of UEs with poor communication quality, we should also deploy",
732
+ "bbox": [
733
+ 109,
734
+ 765,
735
+ 883,
736
+ 914
737
+ ],
738
+ "page_idx": 10
739
+ },
740
+ {
741
+ "type": "page_number",
742
+ "text": "11",
743
+ "bbox": [
744
+ 867,
745
+ 37,
746
+ 880,
747
+ 47
748
+ ],
749
+ "page_idx": 10
750
+ },
751
+ {
752
+ "type": "text",
753
+ "text": "some RISs in dedicated areas with weak signal coverage and communication dead zones caused by blockages. On the other hand, from the perspective of WET, RIS should be deployed in scenarios where it is difficult to install powerlines for emergencies, such as UAV communication and temporary placement sites. In practice, ensuring proper system operation first requires that the RIS can effectively receive wireless energy signals to support its operation. As such, a RIS should be reasonably deployed near an AP to facilitate the establishment of a LoS path.",
754
+ "bbox": [
755
+ 109,
756
+ 82,
757
+ 883,
758
+ 233
759
+ ],
760
+ "page_idx": 11
761
+ },
762
+ {
763
+ "type": "text",
764
+ "text": "In practice, the dynamic of propagation environment and user mobility lead to channel variations over time and each RIS may also be associated with multiple APs and UEs while multiple RISs may be interconnected. In such cases, heuristic solutions to the design of RISs deployment may be ineffective. In particular, it is generally intractable to acquire accurate globally channel state information (CSI) at a low cost in large-size systems. Therefore, how to determine a precise location deployment scheme of multiple RISs based on partial system information is a new problem with high practical significance. A promising approach to solve this problem is to utilize machine learning techniques, such as deep learning (DL). For example, in the training phase, we can empirically deploy multiple RISs at reasonable reference locations and collect key performance metrics, such as the received signal strength and the corresponding energy efficiency measured at different UE locations. The collected key metrics and the location of the RIS are then exploited to train the DL-based neural network as an input and an output, respectively. However, during the training process, it is generally not possible to change the locations of RISs and additional installation costs will be incurred. In fact, the obtained results serve as a performance upper bound for a fixed RIS deployment location. As for more practical implementation, one may consider statical long-term optimization. Besides, we can adopt convex optimization methods such as gradient descent method and Newton method to optimize RIS location and system power allocation to improve the system performance.",
765
+ "bbox": [
766
+ 109,
767
+ 239,
768
+ 885,
769
+ 708
770
+ ],
771
+ "page_idx": 11
772
+ },
773
+ {
774
+ "type": "text",
775
+ "text": "IV. FUTURE DIRECTIONS",
776
+ "text_level": 1,
777
+ "bbox": [
778
+ 387,
779
+ 732,
780
+ 606,
781
+ 750
782
+ ],
783
+ "page_idx": 11
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "In addition to the system architecture and deployment schemes discussed above, there still are some other research directions worthy of future investigation for the systems.",
788
+ "bbox": [
789
+ 112,
790
+ 763,
791
+ 883,
792
+ 809
793
+ ],
794
+ "page_idx": 11
795
+ },
796
+ {
797
+ "type": "text",
798
+ "text": "A. Control Scheme",
799
+ "text_level": 1,
800
+ "bbox": [
801
+ 112,
802
+ 839,
803
+ 272,
804
+ 856
805
+ ],
806
+ "page_idx": 11
807
+ },
808
+ {
809
+ "type": "text",
810
+ "text": "For a single RIS, how to control the activation of energy harvesting and information reflection elements is an important problem to be solved with the following aspects. For instance, stringent",
811
+ "bbox": [
812
+ 111,
813
+ 869,
814
+ 883,
815
+ 914
816
+ ],
817
+ "page_idx": 11
818
+ },
819
+ {
820
+ "type": "page_number",
821
+ "text": "12",
822
+ "bbox": [
823
+ 867,
824
+ 36,
825
+ 880,
826
+ 47
827
+ ],
828
+ "page_idx": 11
829
+ },
830
+ {
831
+ "type": "text",
832
+ "text": "requirements are imposed for the hardware design of RIS while higher information receiving and processing capabilities are also required. Besides, the massive elements in RIS incur new challenges to the optimization algorithm of high-dimensional matrices [9]. As for the systems, the traditional analysis results in the literature do not consider the cooperation among RISs, which may lead to UE unfairness and communication quality degradation due to signal interference among multiple RISs. To fully utilize the advantage of the strong directivity of the RIS reflected beam, the RIS phase shift matrix is designed according to the AP and UE positions to achieve precise control of the beam direction, thereby avoiding interference as much as possible. For example, with a large number of APs and RISs, computationally-efficient distributed machine learning algorithms serve as appealing solutions as they have been successfully applied to address large-scale optimization problems. However, a rigorous control protocol must be designed to ensure orderly communication and harness the interference among devices.",
833
+ "bbox": [
834
+ 109,
835
+ 82,
836
+ 883,
837
+ 390
838
+ ],
839
+ "page_idx": 12
840
+ },
841
+ {
842
+ "type": "text",
843
+ "text": "B. Resource Allocation",
844
+ "text_level": 1,
845
+ "bbox": [
846
+ 112,
847
+ 419,
848
+ 305,
849
+ 435
850
+ ],
851
+ "page_idx": 12
852
+ },
853
+ {
854
+ "type": "text",
855
+ "text": "Large and complex systems inevitably face the problem of resource allocation. In traditional CF systems, pilot assignment is an important research direction that is also inherent to our system. Besides, for the consideration of supporting WET technology, power allocation is another key issue worthy of attention. Specifically, we need to control the energy supply offered by APs to optimize the system energy efficiency. If all the APs adopt a uniform power allocation to supply energy to RISs, it is generally suboptimal which can cause energy waste. As such, certain optimization mechanisms are needed to maximize power utilization. For instance, for imperfect CSI, we can exploit the large-scale channel information and solve the related resource allocation problem through optimization algorithms such as geometric programming. In practice, formulating design optimization problems to realize accurate and efficient power allocation is an ideal solution. However, most of the practical problems are non-convex and intractable in complex networks. Hence, finding an optimal power control scheme within a reasonable timescale is an urgent future research direction. It is worth mentioning that using the WET technology to supply energy to UEs is also worth considering in resource allocation.",
856
+ "bbox": [
857
+ 109,
858
+ 449,
859
+ 888,
860
+ 811
861
+ ],
862
+ "page_idx": 12
863
+ },
864
+ {
865
+ "type": "text",
866
+ "text": "C. Hardware",
867
+ "text_level": 1,
868
+ "bbox": [
869
+ 112,
870
+ 839,
871
+ 223,
872
+ 854
873
+ ],
874
+ "page_idx": 12
875
+ },
876
+ {
877
+ "type": "text",
878
+ "text": "To achieve ubiquitous deployment of RIS-aided CF massive MIMO systems, the cost and quality of hardware for receivers and transmitters as well as RIS naturally swing a non-trivial",
879
+ "bbox": [
880
+ 109,
881
+ 869,
882
+ 883,
883
+ 914
884
+ ],
885
+ "page_idx": 12
886
+ },
887
+ {
888
+ "type": "page_number",
889
+ "text": "13",
890
+ "bbox": [
891
+ 867,
892
+ 36,
893
+ 880,
894
+ 47
895
+ ],
896
+ "page_idx": 12
897
+ },
898
+ {
899
+ "type": "text",
900
+ "text": "trade-off in system design. Yet, to overcome the associated high cost, we typically utilize low-cost components at APs. Unfortunately, the use of low-cost hardware at RIS affects the hardware accuracy, including the limited phase shift resolution and the mutual coupling of phase and incidence angles in each RIS element. In practice, the non-linear amplitude and phase response of RIS would affect the accuracy of reflected signals, resulting in unsatisfactory system performance. As a result, a pragmatic system design taking into the potential hardware imperfect is necessary. Besides, the hardware design of energy reception introduces extra hardware complexity in fabricating RIS. Indeed, designing WET elements at the wavelength level is challenging. In addition, how to avoid the coupling interference between the WET elements and the reflecting elements is also a problem that needs to be considered. On the other hand, the fronthaul capacity limitation reduces the availability of converging control signals between the APs and the CPU. As such, the above hardware impairment problems would definitely degrade the communication and energy transmission performance of the system that are important issues for investigation.",
901
+ "bbox": [
902
+ 109,
903
+ 82,
904
+ 883,
905
+ 416
906
+ ],
907
+ "page_idx": 13
908
+ },
909
+ {
910
+ "type": "text",
911
+ "text": "V. CONCLUSIONS",
912
+ "text_level": 1,
913
+ "bbox": [
914
+ 419,
915
+ 439,
916
+ 575,
917
+ 455
918
+ ],
919
+ "page_idx": 13
920
+ },
921
+ {
922
+ "type": "text",
923
+ "text": "In this article, we investigate the promising RIS-aided CF massive MIMO system with WET technology for realizing IoE in future wireless networks. First, we discussed several potential application scenarios of the system and the proposed system architecture. Besides, we proposed different operation modes and shed light on the suitable RIS hardware design for WET. In addition, we investigated feasible solutions for the possible deployment of RISs in the system. Finally, to offer useful guidance for future research, we indicated the critical challenges and promising research directions for realizing RIS-aided CF massive MIMO systems with WET.",
924
+ "bbox": [
925
+ 109,
926
+ 469,
927
+ 883,
928
+ 645
929
+ ],
930
+ "page_idx": 13
931
+ },
932
+ {
933
+ "type": "text",
934
+ "text": "REFERENCES",
935
+ "text_level": 1,
936
+ "bbox": [
937
+ 441,
938
+ 669,
939
+ 553,
940
+ 684
941
+ ],
942
+ "page_idx": 13
943
+ },
944
+ {
945
+ "type": "list",
946
+ "sub_type": "ref_text",
947
+ "list_items": [
948
+ "[1] H. Tataria, M. Shafi, A. F. Molisch, M. Dohler, H. Sjoland, and F. Tufvesson, “6G wireless systems: Vision, requirements, challenges, insights, and opportunities,” Proc. IEEE, vol. 109, no. 7, pp. 1166–1199, Jul. 2021.",
949
+ "[2] J. Hu, Q. Wang, and K. Yang, “Energy self-sustainability in full-spectrum 6G,” IEEE Wireless Commun., vol. 28, no. 1, pp. 104–111, Jan. 2020.",
950
+ "[3] J. Zhang, E. Björnson, M. Matthaiou, D. W. K. Ng, H. Yang, and D. J. Love, “Prospective multiple antenna technologies for beyond 5G,” IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1637–1660, Aug. 2020.",
951
+ "[4] S. Chen, J. Zhang, E. Björnson, J. Zhang, and B. Ai, “Structured massive access for scalable cell-free massive MIMO systems,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1086–1100, Apr. 2020.",
952
+ "[5] W. Tang, M. Z. Chen, J. Y. Dai, Y. Zeng, X. Zhao, S. Jin, Q. Cheng, and T. J. Cui, \"Wireless communications with programmable metasurface: New paradigms, opportunities, and challenges on transceiver design,\" IEEE Wireless Commun., vol. 27, no. 2, pp. 180-187, Feb. 2020."
953
+ ],
954
+ "bbox": [
955
+ 119,
956
+ 700,
957
+ 883,
958
+ 912
959
+ ],
960
+ "page_idx": 13
961
+ },
962
+ {
963
+ "type": "page_number",
964
+ "text": "14",
965
+ "bbox": [
966
+ 867,
967
+ 37,
968
+ 880,
969
+ 46
970
+ ],
971
+ "page_idx": 13
972
+ },
973
+ {
974
+ "type": "list",
975
+ "sub_type": "ref_text",
976
+ "list_items": [
977
+ "[6] Q. Wu and R. Zhang, \"Towards smart and reconfigurable environment: Intelligent reflecting surface aided wireless network,\" IEEE Commun. Mag., vol. 58, no. 1, pp. 106-112, Jan. 2019.",
978
+ "[7] M. Di Renzo, A. Zappone, M. Debbah, M.-S. Alouini, C. Yuen, J. De Rosny, and S. Tretyakov, “Smart radio environments empowered by reconfigurable intelligent surfaces: How it works, state of research, and the road ahead,” IEEE J. Sel. Areas Commun., vol. 38, no. 11, pp. 2450–2525, Nov. 2020.",
979
+ "[8] Q. Wu and R. Zhang, \"Joint active and passive beamforming optimization for intelligent reflecting surface assisted SWIPT under QoS constraints,\" IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1735-1748, Aug. 2020.",
980
+ "[9] S. Hu, Z. Wei, Y. Cai, C. Liu, D. W. K. Ng, and J. Yuan, \"Robust and secure sum-rate maximization for multiuser MISO downlink systems with self-sustainable IRS,\" IEEE Trans. Commun., early access, 2021.",
981
+ "[10] T. Van Chien, H. Q. Ngo, S. Chatzinotas, M. Di Renzo, and B. Ottersten, \"Reconfigurable intelligent surface-assisted cell-free massive MIMO systems over spatially-correlated channels,\" arXiv preprint arXiv:2104.08648, 2021.",
982
+ "[11] Z. Zhang and L. Dai, \"A joint precoding framework for wideband reconfigurable intelligent surface-aided cell-free network,\" IEEE Trans. Signal Process., vol. 69, pp. 4085-4101, 2021.",
983
+ "[12] M. Giordani, M. Polese, M. Mezzavilla, S. Rangan, and M. Zorzi, “Toward 6G networks: Use cases and technologies,” IEEE Commun. Mag., vol. 58, no. 3, pp. 55–61, Mar. 2020.",
984
+ "[13] J. Zhang, J. Zhang, D. W. K. Ng, S. Jin, and B. Ai, \"Improving sum-rate of cell-free massive MIMO with expanded compute-and-forward,\" IEEE Trans. Signal Process., pp. 1-1, 2021.",
985
+ "[14] C. Liaskos, S. Nie, A. Tsioliaridou, A. Pitsillides, S. Ioannidis, and I. Akyildiz, “A new wireless communication paradigm through software-controlled metasurfaces,” IEEE Commun. Mag., vol. 56, no. 9, pp. 162–169, Sep. 2018.",
986
+ "[15] X. Liu, Y. Liu, Y. Chen, and H. V. Poor, “RIS enhanced massive non-orthogonal multiple access networks: Deployment and passive beamforming design,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1057–1071, Apr. 2020."
987
+ ],
988
+ "bbox": [
989
+ 112,
990
+ 83,
991
+ 883,
992
+ 496
993
+ ],
994
+ "page_idx": 14
995
+ },
996
+ {
997
+ "type": "page_number",
998
+ "text": "15",
999
+ "bbox": [
1000
+ 867,
1001
+ 37,
1002
+ 880,
1003
+ 46
1004
+ ],
1005
+ "page_idx": 14
1006
+ }
1007
+ ]
2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_model.json ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.254,
8
+ 0.058,
9
+ 0.683
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2201.11302v2 [cs.IT] 28 Jan 2022"
13
+ },
14
+ {
15
+ "type": "page_number",
16
+ "bbox": [
17
+ 0.874,
18
+ 0.038,
19
+ 0.882,
20
+ 0.047
21
+ ],
22
+ "angle": 0,
23
+ "content": "1"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.182,
29
+ 0.074,
30
+ 0.817,
31
+ 0.109
32
+ ],
33
+ "angle": 0,
34
+ "content": "Wireless Energy Transfer in RIS-Aided"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.215,
40
+ 0.127,
41
+ 0.783,
42
+ 0.16
43
+ ],
44
+ "angle": 0,
45
+ "content": "Cell-Free Massive MIMO Systems:"
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.262,
51
+ 0.18,
52
+ 0.737,
53
+ 0.214
54
+ ],
55
+ "angle": 0,
56
+ "content": "Opportunities and Challenges"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.132,
62
+ 0.236,
63
+ 0.865,
64
+ 0.29
65
+ ],
66
+ "angle": 0,
67
+ "content": "Enyu Shi, Jiayi Zhang, Senior Member, IEEE, Shuaifei Chen, Jiakang Zheng, Yan Zhang,"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.192,
73
+ 0.301,
74
+ 0.807,
75
+ 0.321
76
+ ],
77
+ "angle": 0,
78
+ "content": "Derrick Wing Kwan Ng, Fellow, IEEE, and Bo Ai, Fellow, IEEE"
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.466,
84
+ 0.371,
85
+ 0.531,
86
+ 0.385
87
+ ],
88
+ "angle": 0,
89
+ "content": "Abstract"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.151,
95
+ 0.405,
96
+ 0.848,
97
+ 0.738
98
+ ],
99
+ "angle": 0,
100
+ "content": "In future sixth-generation (6G) mobile networks, the Internet-of-Everything (IoE) is expected to provide extremely massive connectivity for small battery-powered devices. Indeed, massive devices with limited energy storage capacity impose persistent energy demand hindering the lifetime of communication networks. As a remedy, wireless energy transfer (WET) is a key technology to address these critical energy supply issues. On the other hand, cell-free (CF) massive multiple-input multiple-output (MIMO) systems offer an efficient network architecture to realize the roll-out of the IoE. In this article, we first propose the paradigm of reconfigurable intelligent surface (RIS)-aided CF massive MIMO systems for WET, including its potential application scenarios and system architecture. The four-stage transmission procedure is discussed and analyzed to illustrate the practicality of the architecture. Then we put forward and analyze the hardware design of RIS. Particularly, we discuss the three corresponding operating modes and the amalgamation of WET technology and RIS-aided CF massive MIMO. Representative simulation results are given to confirm the superior performance achieved by our proposed schemes. Also, we investigate the optimal location of deploying multiple RISs to achieve the best system performance. Finally, several important research directions of RIS-aided CF massive MIMO systems with WET are presented to inspire further potential investigation."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.112,
106
+ 0.768,
107
+ 0.883,
108
+ 0.802
109
+ ],
110
+ "angle": 0,
111
+ "content": "E. Shi, J. Zhang, S. Chen, J. Zheng, and Y. Zhang are with the School of Electronic and Information Engineering, Beijing Jiaotong University, Beijing 100044, P. R. China. (e-mail: {jiayizhang} @bjtu.edu.cn)."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.112,
117
+ 0.808,
118
+ 0.883,
119
+ 0.841
120
+ ],
121
+ "angle": 0,
122
+ "content": "B. Ai is with the State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing 100044, China. (e-mail: boai@bjtu.edu.cn)."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.112,
128
+ 0.847,
129
+ 0.883,
130
+ 0.881
131
+ ],
132
+ "angle": 0,
133
+ "content": "D. W. K. Ng is with the School of Electrical Engineering and Telecommunications, University of New South Wales, NSW 2052, Australia. (e-mail: w.k.ng@unsw.edu.au)."
134
+ },
135
+ {
136
+ "type": "list",
137
+ "bbox": [
138
+ 0.112,
139
+ 0.768,
140
+ 0.883,
141
+ 0.881
142
+ ],
143
+ "angle": 0,
144
+ "content": null
145
+ }
146
+ ],
147
+ [
148
+ {
149
+ "type": "page_number",
150
+ "bbox": [
151
+ 0.872,
152
+ 0.037,
153
+ 0.884,
154
+ 0.049
155
+ ],
156
+ "angle": 0,
157
+ "content": "2"
158
+ },
159
+ {
160
+ "type": "title",
161
+ "bbox": [
162
+ 0.419,
163
+ 0.083,
164
+ 0.579,
165
+ 0.1
166
+ ],
167
+ "angle": 0,
168
+ "content": "I. INTRODUCTION"
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.111,
174
+ 0.113,
175
+ 0.885,
176
+ 0.448
177
+ ],
178
+ "angle": 0,
179
+ "content": "The fifth-generation (5G) wireless network has targeted a 1000-fold increase in network capacity offering ubiquitous wireless connection for at least 100 billion devices worldwide, compared with the previous generations of networks. Recently, with the large-scale commercialization of the fifth-generation (5G) worldwide, the global industry has begun initial research on the next-generation mobile communication technology, i.e., the sixth-generation (6G). One of the key performance indicators for 6G is its extremely massive connectivity for small devices to enable the so-called Internet-of-Everything (IoE) [1]. In practice, most of these IoE devices will be either battery-powered or battery-less due to the associated high-cost of applying conventional power-grid-based solutions. Unfortunately, the use of limited battery power shortens the lifetime of networks degrading the quality of service. Although frequent battery replacement offers an intermediate solution to this problem, a large number of devices in IoE would further lead to exceedingly high labor and material costs. Therefore, advanced energy replenishing solutions are urgently needed to improve the energy supply challenges of future networks."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.111,
185
+ 0.455,
186
+ 0.887,
187
+ 0.71
188
+ ],
189
+ "angle": 0,
190
+ "content": "Wireless energy transfer (WET) has been proposed to address various practical scenarios where adopting electrical grid is not possible, such as unmanned aerial vehicle (UAV) communications, wireless sensor networks with sensors embedded in challenging environment structures, or inside a human body [2]. By exploiting the far-field radiation properties of electromagnetic (EM) waves, the radio frequency (RF) energy signal radiated by the transmitter can be harvested at the receiver which converts it into electrical energy for future use. However, WET technologies face various technical problems such as large path loss attenuation, challenging energy beam alignment, and inefficient resource allocation. Hence, to fully unlock the potential of WET, it must be combined with other advanced communication technologies and architectures to fully unlock the potential of practical IoE networks."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.111,
196
+ 0.718,
197
+ 0.885,
198
+ 0.895
199
+ ],
200
+ "angle": 0,
201
+ "content": "The cellular concept has been introduced over half a century for handling a small number of users in a large area. Recently, cell-free (CF) massive MIMO systems have been proposed [3], which advocates the removal of cellular boundaries for supporting the massive number of users. In practice, this paradigm can effectively shorten the distance between communication devices and access points (APs) through distributed deployment, thereby improving communication performance. Specifically, all the APs are connected to a central processing unit (CPU) with high-speed fronthaul links. Different from conventional centralized massive MIMO systems,"
202
+ }
203
+ ],
204
+ [
205
+ {
206
+ "type": "page_number",
207
+ "bbox": [
208
+ 0.873,
209
+ 0.038,
210
+ 0.882,
211
+ 0.047
212
+ ],
213
+ "angle": 0,
214
+ "content": "3"
215
+ },
216
+ {
217
+ "type": "image",
218
+ "bbox": [
219
+ 0.169,
220
+ 0.085,
221
+ 0.833,
222
+ 0.309
223
+ ],
224
+ "angle": 0,
225
+ "content": null
226
+ },
227
+ {
228
+ "type": "image_caption",
229
+ "bbox": [
230
+ 0.161,
231
+ 0.333,
232
+ 0.835,
233
+ 0.352
234
+ ],
235
+ "angle": 0,
236
+ "content": "Fig. 1: Application scenarios of RIS-aided CF massive MIMO systems with WET."
237
+ },
238
+ {
239
+ "type": "text",
240
+ "bbox": [
241
+ 0.111,
242
+ 0.395,
243
+ 0.885,
244
+ 0.649
245
+ ],
246
+ "angle": 0,
247
+ "content": "APs are deployed in a certain range in a distributed manner and cooperate among themselves which offers rich spatial diversity to improve the system spectral efficiency (SE). On the other hand, the use of high-frequency bands, e.g., terahertz (THz), is expected for 6G networks to cope with the aggressive needs required in massive access [4]. Indeed, by further considering the path loss in high-frequency bands, super-dense APs have to be deployed for reducing communication distances and for ensuring line-of-sight (LoS) between APs and IoE devices. In general, to support such large-scale multiple access networks, a large amount of energy would be radiated, while the increased interference imposed by super-dense APs has to be carefully managed and controlled. Therefore, it is imperative to study an innovative, spectrally, and energy-efficient, but low-cost 6G wireless network solution."
248
+ },
249
+ {
250
+ "type": "text",
251
+ "bbox": [
252
+ 0.111,
253
+ 0.658,
254
+ 0.885,
255
+ 0.913
256
+ ],
257
+ "angle": 0,
258
+ "content": "Recently, reconfigurable intelligent surface (RIS) has been proposed as a promising new technology for reconfiguring the wireless propagation environment through software-controlled signal reflection [5]–[7]. Specifically, RIS requires only low power consumption and low cost. Undoubtedly, RIS can address the shortcomings of CF architecture in future communications and these two technologies complement each other. On the other hand, although RIS was initially proposed as a passive component, the coordination of a large number of elements still requires a certain amount of electrical energy [7]. When there are a large number of RISs, configuring a physical link for each RIS would cause huge resource consumption. Therefore, WET technology is an excellent solution to replace the grid energy-based approach. The energy supply of RIS through WET technology is expected to realize passive deployment of RIS, reduce hardware"
259
+ }
260
+ ],
261
+ [
262
+ {
263
+ "type": "page_number",
264
+ "bbox": [
265
+ 0.872,
266
+ 0.038,
267
+ 0.884,
268
+ 0.048
269
+ ],
270
+ "angle": 0,
271
+ "content": "4"
272
+ },
273
+ {
274
+ "type": "text",
275
+ "bbox": [
276
+ 0.111,
277
+ 0.083,
278
+ 0.885,
279
+ 0.365
280
+ ],
281
+ "angle": 0,
282
+ "content": "overhead, and improve the system energy efficiency. Despite its great potential, there are relatively little researches on RIS-aided CF massive MIMO systems at present. In fact, some authors have studied the system performance of a single-RIS system, or optimized communication problems in multiple RIS systems [8]. Also, the authors have studied the combination of RIS and WET and the corresponding optimization through advanced optimization [9]. The results unveiled the non-trivial trade-off between achieving RIS self-sustainability and the system sum-rate. Others have studied the system performance of a single RIS-aided CF massive MIMO systems under idealistic conditions such as with sufficient energy storage and Rayleigh fading channels [10]. Besides, the authors introduced a precoding framework for RIS-aided CF networks [11]. Nevertheless, there is a lack of thorough research on the study of RIS-aided CF massive MIMO systems and their applications with WET technology."
283
+ },
284
+ {
285
+ "type": "text",
286
+ "bbox": [
287
+ 0.111,
288
+ 0.371,
289
+ 0.887,
290
+ 0.626
291
+ ],
292
+ "angle": 0,
293
+ "content": "In this article, we try to answer the question: How to apply WET technology in RIS-aided CF massive MIMO systems? To fully exploit the RIS benefits and the WET technology, we design the system architecture applying the WET technology to RIS and discuss its potential future application scenarios. In addition, we design the transmission procedures of the considered system and analyze each procedure. Based on this, we design different hardware architectures of RIS with WET technology for different practical scenarios. Meanwhile, we propose and compare different operation modes of this system, which provide useful insights for the implication of RIS-aided CF massive MIMO systems. With the novel system architecture, we also discuss how to deploy RIS to achieve better system performance. Finally, we highlight potential research directions that deserve further study."
294
+ },
295
+ {
296
+ "type": "title",
297
+ "bbox": [
298
+ 0.217,
299
+ 0.655,
300
+ 0.776,
301
+ 0.671
302
+ ],
303
+ "angle": 0,
304
+ "content": "II. MAIN APPLICATION SCENARIOS AND SYSTEM ARCHITECTURE"
305
+ },
306
+ {
307
+ "type": "text",
308
+ "bbox": [
309
+ 0.111,
310
+ 0.685,
311
+ 0.885,
312
+ 0.782
313
+ ],
314
+ "angle": 0,
315
+ "content": "In this section, we introduce the main application scenarios of RIS-aided CF massive MIMO systems in future wireless networks and analyze the characteristics of different scenarios. Meanwhile, we provide a detailed introduction to the architecture of the considered system and the corresponding transmission procedure."
316
+ },
317
+ {
318
+ "type": "title",
319
+ "bbox": [
320
+ 0.111,
321
+ 0.814,
322
+ 0.37,
323
+ 0.831
324
+ ],
325
+ "angle": 0,
326
+ "content": "A. Main Application Scenarios"
327
+ },
328
+ {
329
+ "type": "text",
330
+ "bbox": [
331
+ 0.111,
332
+ 0.845,
333
+ 0.885,
334
+ 0.915
335
+ ],
336
+ "angle": 0,
337
+ "content": "In the current research, e.g., [6], [7] as a passive device, RIS can be flexibly deployed in communication systems for improving the coverage area and providing wireless services for dead spots. In future 6G network architecture, RIS should be combined with other technologies"
338
+ }
339
+ ],
340
+ [
341
+ {
342
+ "type": "page_number",
343
+ "bbox": [
344
+ 0.873,
345
+ 0.038,
346
+ 0.882,
347
+ 0.047
348
+ ],
349
+ "angle": 0,
350
+ "content": "5"
351
+ },
352
+ {
353
+ "type": "image",
354
+ "bbox": [
355
+ 0.345,
356
+ 0.083,
357
+ 0.656,
358
+ 0.355
359
+ ],
360
+ "angle": 0,
361
+ "content": null
362
+ },
363
+ {
364
+ "type": "image_caption",
365
+ "bbox": [
366
+ 0.169,
367
+ 0.379,
368
+ 0.825,
369
+ 0.397
370
+ ],
371
+ "angle": 0,
372
+ "content": "Fig. 2: System architecture of RIS-aided CF massive MIMO systems with WET."
373
+ },
374
+ {
375
+ "type": "text",
376
+ "bbox": [
377
+ 0.111,
378
+ 0.441,
379
+ 0.884,
380
+ 0.511
381
+ ],
382
+ "angle": 0,
383
+ "content": "to achieve better communication performance. Our proposed RIS-aided CF massive MIMO communication architecture serves as an excellent candidate for better exploiting the advantages of WET technology and realizing the vision of IoE."
384
+ },
385
+ {
386
+ "type": "text",
387
+ "bbox": [
388
+ 0.111,
389
+ 0.519,
390
+ 0.885,
391
+ 0.907
392
+ ],
393
+ "angle": 0,
394
+ "content": "The future wireless networks are expected to make full use of the low, medium, and high full-spectrum resources to achieve seamless global coverage of space, sky, and earth trinity, such that they can satisfy the stringent demand for establishing unlimited safe and reliable \"human-machine-object\" connections anytime and anywhere. Indeed, the success of this desired vision relies on the support of massive access required by the IoE, requiring higher transmission rates, lower delays, and higher reliability [12]. The main scenes include two categories, densely populated spaces and large-scale factories with densely deployed equipment. As shown in Fig. 1, crowded spaces include large indoor shopping malls, basketball courts, restaurants, stadiums, and so on. In contrast, the scene of a large-scale factory with densely deployed equipment is relatively static and the only main challenge is to facilitate energy harvesting at potential equipment. Note that in the factory scenario, CPU and AP are internal devices rather than external additional devices. The main feature of the former is that the mobility of personnel is relatively high and the wireless equipment in the latter has only limited mobility. For the former scenes, there is generally more uncertainty in wireless communication channels. For example, for the indoor scenario, under the original CF system architecture, we can deploy"
395
+ }
396
+ ],
397
+ [
398
+ {
399
+ "type": "page_number",
400
+ "bbox": [
401
+ 0.873,
402
+ 0.038,
403
+ 0.884,
404
+ 0.048
405
+ ],
406
+ "angle": 0,
407
+ "content": "6"
408
+ },
409
+ {
410
+ "type": "image",
411
+ "bbox": [
412
+ 0.313,
413
+ 0.083,
414
+ 0.675,
415
+ 0.355
416
+ ],
417
+ "angle": 0,
418
+ "content": null
419
+ },
420
+ {
421
+ "type": "image_caption",
422
+ "bbox": [
423
+ 0.112,
424
+ 0.379,
425
+ 0.884,
426
+ 0.397
427
+ ],
428
+ "angle": 0,
429
+ "content": "Fig. 3: Four-stage transmission procedure of RIS-aided CF massive MIMO systems with WET."
430
+ },
431
+ {
432
+ "type": "text",
433
+ "bbox": [
434
+ 0.112,
435
+ 0.445,
436
+ 0.885,
437
+ 0.701
438
+ ],
439
+ "angle": 0,
440
+ "content": "multiple RISs in some communication dead zones and improve the quality of communication by increasing the number of RISs. Since WET technology is exploited to supply the necessary energy to RISs, there is no need to deploy physical power lines for charging which is more flexible in practical implementation. Moreover, in the indoor environment, there is an upsurge in the demand for temporary communication such as large-scale activities in which the role of flexible RIS deployment is particularly prominent. As for outdoor environments such as the gymnasium, we can deploy an unmanned aerial vehicle (UAV) on-demand as a carrier of RIS to offer signal coverage and enhancement through intelligent design of the track. On the other hand, for the latter scenes that equipment is not mobile, we only need to deploy the RIS in a fixed location in advance to improve the system performance."
441
+ },
442
+ {
443
+ "type": "title",
444
+ "bbox": [
445
+ 0.114,
446
+ 0.732,
447
+ 0.304,
448
+ 0.749
449
+ ],
450
+ "angle": 0,
451
+ "content": "B. System Architecture"
452
+ },
453
+ {
454
+ "type": "text",
455
+ "bbox": [
456
+ 0.112,
457
+ 0.763,
458
+ 0.886,
459
+ 0.913
460
+ ],
461
+ "angle": 0,
462
+ "content": "As shown in Fig. 2, the system is based on the CF architecture with an \"additional\" RIS layer between the user equipments (UEs) and the APs. The existing CF massive MIMO system is a three-tier structure composed of the CPU, APs, and UEs. Our proposed system architecture realizes a 3.5-layer architecture, which adds a cascading link through RIS. When there is a direct path, the APs can receive the signal from the UEs via two uplink paths: the direct link and the aggregated link through the RIS. As such, we treat the channels through the RIS with"
463
+ }
464
+ ],
465
+ [
466
+ {
467
+ "type": "page_number",
468
+ "bbox": [
469
+ 0.872,
470
+ 0.037,
471
+ 0.884,
472
+ 0.049
473
+ ],
474
+ "angle": 0,
475
+ "content": "7"
476
+ },
477
+ {
478
+ "type": "text",
479
+ "bbox": [
480
+ 0.111,
481
+ 0.083,
482
+ 0.884,
483
+ 0.129
484
+ ],
485
+ "angle": 0,
486
+ "content": "an additional 0.5 layer. In particular, when the direct path is blocked, this proposed structure can still guarantee stable user communication."
487
+ },
488
+ {
489
+ "type": "text",
490
+ "bbox": [
491
+ 0.111,
492
+ 0.136,
493
+ 0.885,
494
+ 0.339
495
+ ],
496
+ "angle": 0,
497
+ "content": "The CPU has an extremely high computational capability, which not only can receive or transmit a large amount of information but also process the large number of AP receiving data [13]. In contrast, the AP has limited computing resources which are equipped with a simple radio frequency antenna. In the considered system, the difference between the proposed RIS and the traditional one is that our proposed one allows RIS to be equipped with a wireless energy harvesting module, which replaces the original wired-power circuit. By adopting the WET technology, AP is considered as an energy source to charge the RIS module wirelessly to ensure the normal operation of RIS."
498
+ },
499
+ {
500
+ "type": "text",
501
+ "bbox": [
502
+ 0.111,
503
+ 0.346,
504
+ 0.887,
505
+ 0.813
506
+ ],
507
+ "angle": 0,
508
+ "content": "To realize this paradigm, we introduce the flow of the four-stage transmission procedure of the system in Fig. 3. Stage I: Downlink energy transmission: The CPU collects the information transmitted by the AP via the fronthaul and sends control commands to the AP after signal processing. Based on the received signal, AP decides whether to transmit wireless energy signals to the RIS for energy harvesting. Especially, when the stored energy level in the RIS exceeds a threshold value, the RIS controller transmits a feedback signal to the AP for terminating the energy transmission. Stage II: Data transmission: In the uplink, the UE first sends a pilot signal to the APs which reaches the APs directly or through the RISs cascaded channel. Then the APs receive the pilot signal and convey it to the CPU via dedicated fronthaul for channel estimation. Subsequently, the UE sends their uplink data and the APs receive the signal via a direct path as well as the reflected signal arriving through RISs beamforming. Besides, the APs send their received data to the CPU, which performs joint signal processing for data detection. Finally, the CPU obtains the uplink decoded signal based on the global channel estimation. Then the downlink data signal is generated by the downlink precoder and transmitted to the AP, which finally reaches the UEs through the RIS. Meanwhile, AP generates the signal to control the RIS for phases adaptation. Note that if the AP desires to realize dynamic control to RIS, it is necessary to modify the frame structure and to insert some control time slots. Correspondingly, deploying signal processing modules at RIS may be needed to respond to the control signals."
509
+ },
510
+ {
511
+ "type": "title",
512
+ "bbox": [
513
+ 0.382,
514
+ 0.839,
515
+ 0.613,
516
+ 0.856
517
+ ],
518
+ "angle": 0,
519
+ "content": "III. DEPLOYMENT DESIGN"
520
+ },
521
+ {
522
+ "type": "text",
523
+ "bbox": [
524
+ 0.111,
525
+ 0.87,
526
+ 0.884,
527
+ 0.915
528
+ ],
529
+ "angle": 0,
530
+ "content": "In this section, we propose a RIS hardware design scheme supported by the WET technology. On this basis, we explore different system operation modes and compare their advantages and"
531
+ }
532
+ ],
533
+ [
534
+ {
535
+ "type": "page_number",
536
+ "bbox": [
537
+ 0.872,
538
+ 0.038,
539
+ 0.882,
540
+ 0.048
541
+ ],
542
+ "angle": 0,
543
+ "content": "8"
544
+ },
545
+ {
546
+ "type": "text",
547
+ "bbox": [
548
+ 0.111,
549
+ 0.083,
550
+ 0.885,
551
+ 0.128
552
+ ],
553
+ "angle": 0,
554
+ "content": "disadvantages. Finally, we discuss various practical scenarios on how to effectively determine the location of RIS deployments in practice."
555
+ },
556
+ {
557
+ "type": "title",
558
+ "bbox": [
559
+ 0.111,
560
+ 0.16,
561
+ 0.287,
562
+ 0.178
563
+ ],
564
+ "angle": 0,
565
+ "content": "A. Hardware Design"
566
+ },
567
+ {
568
+ "type": "text",
569
+ "bbox": [
570
+ 0.111,
571
+ 0.19,
572
+ 0.885,
573
+ 0.472
574
+ ],
575
+ "angle": 0,
576
+ "content": "The hardware implementation of RIS is based on the concept of \"metasurface\", each element of which is a programmable sub-wavelength structural unit composed of two-dimensional metamaterials [14]. In practice, the field-programmable gate array (FPGA) can be used as a controller to achieve flexible control of the RIS, which usually communicates and coordinates with other network components (e.g., BS, APs, and UEs) through dedicated links. Although FPGA consumes a small amount of energy, it still needs some electrical power source to support its operation. As shown in Fig. 4, we introduce the wireless energy scavenging module to the original RIS panel and exploit some elements for energy reception and other elements for signal reflection. The energy harvesting elements are connected with a piece of energy storage hardware (e.g., a rechargeable battery), which can store the harvested energy and support the energy consumption of other elements performing reflection."
577
+ },
578
+ {
579
+ "type": "text",
580
+ "bbox": [
581
+ 0.111,
582
+ 0.479,
583
+ 0.887,
584
+ 0.892
585
+ ],
586
+ "angle": 0,
587
+ "content": "RIS is generally assembled by hundreds of elements [6], so it is worth exploring which elements are selected for serving as energy receiving modules. Here, we have designed and compared three types of hardware structures in Fig. 4. The first type consists of a complete separation of the energy harvesting elements and the information reflecting elements which are easy to implement in hardware. Yet, when the RIS panel is large, there would be some energy reception dead spots in this design due to non-uniform energy flux created by impinging signals. Based on this, we further propose an improved block structure, i.e., deployment energy harvesting elements at the four corners of the RIS, i.e., type 2, which can alleviate the impacts of dead spots but introduce moderate hardware implementation difficulty. Finally, we also design a scattered structure that aims at reducing the impacts caused by dead zones for energy harvesting or signal reflection, but at the cost of higher hardware complexity. In practice, the energy consumption of the centralized and distributed element designs mainly depends on the number of elements used for information reflection. Indeed, in addition to considering the balance between the system performance and the implementation complexity, the ratio between the number of RIS elements in energy harvesting mode and that in reflecting mode is another key issue. This is determined by various factors, such as distance, energy conversion efficiency, and the channel environment."
588
+ }
589
+ ],
590
+ [
591
+ {
592
+ "type": "page_number",
593
+ "bbox": [
594
+ 0.873,
595
+ 0.038,
596
+ 0.884,
597
+ 0.048
598
+ ],
599
+ "angle": 0,
600
+ "content": "9"
601
+ },
602
+ {
603
+ "type": "text",
604
+ "bbox": [
605
+ 0.112,
606
+ 0.083,
607
+ 0.884,
608
+ 0.128
609
+ ],
610
+ "angle": 0,
611
+ "content": "In practice, the information and energy elements ratios can be adaptively adjusted according to the feedback of the actual parameters to realize dynamic assignment [9]."
612
+ },
613
+ {
614
+ "type": "image",
615
+ "bbox": [
616
+ 0.203,
617
+ 0.154,
618
+ 0.365,
619
+ 0.297
620
+ ],
621
+ "angle": 0,
622
+ "content": null
623
+ },
624
+ {
625
+ "type": "image",
626
+ "bbox": [
627
+ 0.369,
628
+ 0.153,
629
+ 0.647,
630
+ 0.299
631
+ ],
632
+ "angle": 0,
633
+ "content": null
634
+ },
635
+ {
636
+ "type": "image",
637
+ "bbox": [
638
+ 0.672,
639
+ 0.153,
640
+ 0.798,
641
+ 0.299
642
+ ],
643
+ "angle": 0,
644
+ "content": null
645
+ },
646
+ {
647
+ "type": "image_caption",
648
+ "bbox": [
649
+ 0.111,
650
+ 0.319,
651
+ 0.889,
652
+ 0.442
653
+ ],
654
+ "angle": 0,
655
+ "content": "Fig. 4: Hardware designs for information and energy transmission in a RIS. Type 1: Energy harvesting and information reflecting elements are completely separated and distributed in blocks. Type 2: Block structure with incomplete separation of energy harvesting and information reflection elements. Type 3: Energy harvesting and information reflecting elements alternate with each other in a scattered distribution."
656
+ },
657
+ {
658
+ "type": "title",
659
+ "bbox": [
660
+ 0.114,
661
+ 0.494,
662
+ 0.285,
663
+ 0.51
664
+ ],
665
+ "angle": 0,
666
+ "content": "B. Operation Modes"
667
+ },
668
+ {
669
+ "type": "text",
670
+ "bbox": [
671
+ 0.111,
672
+ 0.525,
673
+ 0.884,
674
+ 0.646
675
+ ],
676
+ "angle": 0,
677
+ "content": "In practice, there are different ways for deploying RISs leading to different system operation modes. They affect the hardware design complexity and signal processing methods, as well as the synergy between individual communication devices, which inevitably brings different system performances. In this section, we discuss several different operation modes of RIS for WET and information reflection."
678
+ },
679
+ {
680
+ "type": "text",
681
+ "bbox": [
682
+ 0.112,
683
+ 0.656,
684
+ 0.885,
685
+ 0.911
686
+ ],
687
+ "angle": 0,
688
+ "content": "1) Centralized RIS: As shown in Fig. 6, we first consider that there is only a single large-scale centralized RIS with massive elements. It is configured with high computational power and signal processing capability at the RIS controller, which can reduce the processing burden of the AP. Meanwhile, the centralized RIS has high beamforming capability that can assist the APs to service massive UEs via excellent interference management [9]. Moreover, the centralized design facilitates highly efficient wireless energy focusing, while it is easier to deploy and maintain the equipment. However, due to the architecture of centralized RIS, the path loss of the long-distance devices can be severe as UEs are randomly deployed. Also, the ability of WET to keep such a sophisticated controller functioning properly still needs to be explored. Besides, the requirements for hardware design are also demanding resulting in high cost."
689
+ }
690
+ ],
691
+ [
692
+ {
693
+ "type": "page_number",
694
+ "bbox": [
695
+ 0.868,
696
+ 0.037,
697
+ 0.884,
698
+ 0.047
699
+ ],
700
+ "angle": 0,
701
+ "content": "10"
702
+ },
703
+ {
704
+ "type": "text",
705
+ "bbox": [
706
+ 0.111,
707
+ 0.083,
708
+ 0.885,
709
+ 0.311
710
+ ],
711
+ "angle": 0,
712
+ "content": "2) Non-cooperative Distributed RIS: In this case, we assume that multiple RISs have noncooperatively deployed in a service area. This distributed deployment is more flexible and can avoid dead spots in WET as much as possible. Meanwhile, the distance between RIS-AP and RIS-UE is shortened that reducing the path loss of WET. As such, significant improvements in energy efficiency and communication quality can be achieved. In practice, the hardware design of each RIS is still relatively simple since it only expects to implement simple signal reflection. Besides, since multiple non-cooperative RISs are distributed in the service area, the signals between different RISs may interfere with each other, which degrades the communication performance of UEs."
713
+ },
714
+ {
715
+ "type": "text",
716
+ "bbox": [
717
+ 0.111,
718
+ 0.319,
719
+ 0.887,
720
+ 0.735
721
+ ],
722
+ "angle": 0,
723
+ "content": "3) Cooperative Distributed RIS: Similar to the last operation mode, we consider that there are multiple RISs in a service area. The difference is that there is some intelligent cooperation among RISs through physical links or dedicated wireless communication as shown in Fig. 6. Note that when there is a physical link connected to the RIS, WET is regarded as supplementary electric energy to reduce the required energy consumption. This operation mode not only reduces the path loss of WET and information communication but also enables better beam management among multiple RISs through cooperation thus reducing interference. However, it also places additional requirements on the hardware design of RISs, such as how to implement physical connections between multiple RISs and the design of the controller. As such, RIS will generate additional energy consumption for channel estimation and cooperation. However, due to the stable characteristics of the channel, frequent communication between RISs is not required, so the required energy consumption is still reasonable. In practice, a high computational requirement at the CPU and advanced optimization algorithms are needed to achieve efficient and intelligent cooperation of RISs. In practice, centralized RIS is more suitable for small-scale networks with dense users. In contrast, distributed RIS has better performance in large-scale networks due to its inherent ability in exploiting spatial diversities."
724
+ },
725
+ {
726
+ "type": "text",
727
+ "bbox": [
728
+ 0.111,
729
+ 0.739,
730
+ 0.885,
731
+ 0.915
732
+ ],
733
+ "angle": 0,
734
+ "content": "In Fig. 5, we compare different RIS operation modes for different element numbers within an area of \\(1000\\mathrm{m}^2\\) and consider the effect of phase errors. The achievable rate (bps/Hz) and the transmit power (dBm) are the two main performance metrics for information communication. It shows that RIS can significantly improve the system performance and compared with centralized RIS, distributed deployment can obtain higher system achievable rate. Moreover, phase errors reduce the performance of the system and the influence of phase errors is more significant with the increase of the RIS element numbers."
735
+ }
736
+ ],
737
+ [
738
+ {
739
+ "type": "page_number",
740
+ "bbox": [
741
+ 0.868,
742
+ 0.038,
743
+ 0.882,
744
+ 0.048
745
+ ],
746
+ "angle": 0,
747
+ "content": "11"
748
+ },
749
+ {
750
+ "type": "image",
751
+ "bbox": [
752
+ 0.361,
753
+ 0.093,
754
+ 0.637,
755
+ 0.265
756
+ ],
757
+ "angle": 0,
758
+ "content": null
759
+ },
760
+ {
761
+ "type": "image_caption",
762
+ "bbox": [
763
+ 0.132,
764
+ 0.289,
765
+ 0.856,
766
+ 0.308
767
+ ],
768
+ "angle": 0,
769
+ "content": "Fig. 5: Achievable rate of different RIS operation modes and elements with phase errors."
770
+ },
771
+ {
772
+ "type": "image",
773
+ "bbox": [
774
+ 0.231,
775
+ 0.332,
776
+ 0.77,
777
+ 0.464
778
+ ],
779
+ "angle": 0,
780
+ "content": null
781
+ },
782
+ {
783
+ "type": "image_caption",
784
+ "bbox": [
785
+ 0.279,
786
+ 0.489,
787
+ 0.719,
788
+ 0.509
789
+ ],
790
+ "angle": 0,
791
+ "content": "Fig. 6: Comparison of different RIS operation modes."
792
+ },
793
+ {
794
+ "type": "title",
795
+ "bbox": [
796
+ 0.114,
797
+ 0.551,
798
+ 0.279,
799
+ 0.568
800
+ ],
801
+ "angle": 0,
802
+ "content": "C. RIS Deployment"
803
+ },
804
+ {
805
+ "type": "text",
806
+ "bbox": [
807
+ 0.111,
808
+ 0.582,
809
+ 0.884,
810
+ 0.756
811
+ ],
812
+ "angle": 0,
813
+ "content": "How to judiciously deploy multiple RISs in a hybrid wireless network containing passive RISs, active CPUs, and APs to optimize the system performance is a critical problem that needs to be solved. Previous studies have provided some solutions for RIS location deployment, such as deploying a RIS close to the UE or the AP side can obtain higher system performance [15]. Unfortunately, the considered system is not a single point-to-point communication and the RIS needs support from the WET technology. In the considered network, energy and information transmission are two equally important performance metrics."
814
+ },
815
+ {
816
+ "type": "text",
817
+ "bbox": [
818
+ 0.111,
819
+ 0.766,
820
+ 0.885,
821
+ 0.915
822
+ ],
823
+ "angle": 0,
824
+ "content": "First, from the perspective of optimizing the system communication performance, the RIS should be intuitively deployed in a location with a clear LoS from the AP to maximize its received signal power for passive beamforming and WET. Although this deployment scheme enjoys good performance for the case of a single UE, it does not work well when the number of UEs increases, due to the lack of enough non-LoS paths to achieve high-rank MIMO channels. To enhance the experience of UEs with poor communication quality, we should also deploy"
825
+ }
826
+ ],
827
+ [
828
+ {
829
+ "type": "page_number",
830
+ "bbox": [
831
+ 0.868,
832
+ 0.037,
833
+ 0.882,
834
+ 0.048
835
+ ],
836
+ "angle": 0,
837
+ "content": "12"
838
+ },
839
+ {
840
+ "type": "text",
841
+ "bbox": [
842
+ 0.111,
843
+ 0.083,
844
+ 0.885,
845
+ 0.234
846
+ ],
847
+ "angle": 0,
848
+ "content": "some RISs in dedicated areas with weak signal coverage and communication dead zones caused by blockages. On the other hand, from the perspective of WET, RIS should be deployed in scenarios where it is difficult to install powerlines for emergencies, such as UAV communication and temporary placement sites. In practice, ensuring proper system operation first requires that the RIS can effectively receive wireless energy signals to support its operation. As such, a RIS should be reasonably deployed near an AP to facilitate the establishment of a LoS path."
849
+ },
850
+ {
851
+ "type": "text",
852
+ "bbox": [
853
+ 0.111,
854
+ 0.24,
855
+ 0.887,
856
+ 0.709
857
+ ],
858
+ "angle": 0,
859
+ "content": "In practice, the dynamic of propagation environment and user mobility lead to channel variations over time and each RIS may also be associated with multiple APs and UEs while multiple RISs may be interconnected. In such cases, heuristic solutions to the design of RISs deployment may be ineffective. In particular, it is generally intractable to acquire accurate globally channel state information (CSI) at a low cost in large-size systems. Therefore, how to determine a precise location deployment scheme of multiple RISs based on partial system information is a new problem with high practical significance. A promising approach to solve this problem is to utilize machine learning techniques, such as deep learning (DL). For example, in the training phase, we can empirically deploy multiple RISs at reasonable reference locations and collect key performance metrics, such as the received signal strength and the corresponding energy efficiency measured at different UE locations. The collected key metrics and the location of the RIS are then exploited to train the DL-based neural network as an input and an output, respectively. However, during the training process, it is generally not possible to change the locations of RISs and additional installation costs will be incurred. In fact, the obtained results serve as a performance upper bound for a fixed RIS deployment location. As for more practical implementation, one may consider statical long-term optimization. Besides, we can adopt convex optimization methods such as gradient descent method and Newton method to optimize RIS location and system power allocation to improve the system performance."
860
+ },
861
+ {
862
+ "type": "title",
863
+ "bbox": [
864
+ 0.388,
865
+ 0.733,
866
+ 0.607,
867
+ 0.751
868
+ ],
869
+ "angle": 0,
870
+ "content": "IV. FUTURE DIRECTIONS"
871
+ },
872
+ {
873
+ "type": "text",
874
+ "bbox": [
875
+ 0.113,
876
+ 0.764,
877
+ 0.885,
878
+ 0.81
879
+ ],
880
+ "angle": 0,
881
+ "content": "In addition to the system architecture and deployment schemes discussed above, there still are some other research directions worthy of future investigation for the systems."
882
+ },
883
+ {
884
+ "type": "title",
885
+ "bbox": [
886
+ 0.113,
887
+ 0.84,
888
+ 0.273,
889
+ 0.857
890
+ ],
891
+ "angle": 0,
892
+ "content": "A. Control Scheme"
893
+ },
894
+ {
895
+ "type": "text",
896
+ "bbox": [
897
+ 0.112,
898
+ 0.871,
899
+ 0.885,
900
+ 0.915
901
+ ],
902
+ "angle": 0,
903
+ "content": "For a single RIS, how to control the activation of energy harvesting and information reflection elements is an important problem to be solved with the following aspects. For instance, stringent"
904
+ }
905
+ ],
906
+ [
907
+ {
908
+ "type": "page_number",
909
+ "bbox": [
910
+ 0.868,
911
+ 0.037,
912
+ 0.882,
913
+ 0.048
914
+ ],
915
+ "angle": 0,
916
+ "content": "13"
917
+ },
918
+ {
919
+ "type": "text",
920
+ "bbox": [
921
+ 0.111,
922
+ 0.083,
923
+ 0.885,
924
+ 0.391
925
+ ],
926
+ "angle": 0,
927
+ "content": "requirements are imposed for the hardware design of RIS while higher information receiving and processing capabilities are also required. Besides, the massive elements in RIS incur new challenges to the optimization algorithm of high-dimensional matrices [9]. As for the systems, the traditional analysis results in the literature do not consider the cooperation among RISs, which may lead to UE unfairness and communication quality degradation due to signal interference among multiple RISs. To fully utilize the advantage of the strong directivity of the RIS reflected beam, the RIS phase shift matrix is designed according to the AP and UE positions to achieve precise control of the beam direction, thereby avoiding interference as much as possible. For example, with a large number of APs and RISs, computationally-efficient distributed machine learning algorithms serve as appealing solutions as they have been successfully applied to address large-scale optimization problems. However, a rigorous control protocol must be designed to ensure orderly communication and harness the interference among devices."
928
+ },
929
+ {
930
+ "type": "title",
931
+ "bbox": [
932
+ 0.114,
933
+ 0.42,
934
+ 0.306,
935
+ 0.436
936
+ ],
937
+ "angle": 0,
938
+ "content": "B. Resource Allocation"
939
+ },
940
+ {
941
+ "type": "text",
942
+ "bbox": [
943
+ 0.111,
944
+ 0.45,
945
+ 0.889,
946
+ 0.812
947
+ ],
948
+ "angle": 0,
949
+ "content": "Large and complex systems inevitably face the problem of resource allocation. In traditional CF systems, pilot assignment is an important research direction that is also inherent to our system. Besides, for the consideration of supporting WET technology, power allocation is another key issue worthy of attention. Specifically, we need to control the energy supply offered by APs to optimize the system energy efficiency. If all the APs adopt a uniform power allocation to supply energy to RISs, it is generally suboptimal which can cause energy waste. As such, certain optimization mechanisms are needed to maximize power utilization. For instance, for imperfect CSI, we can exploit the large-scale channel information and solve the related resource allocation problem through optimization algorithms such as geometric programming. In practice, formulating design optimization problems to realize accurate and efficient power allocation is an ideal solution. However, most of the practical problems are non-convex and intractable in complex networks. Hence, finding an optimal power control scheme within a reasonable timescale is an urgent future research direction. It is worth mentioning that using the WET technology to supply energy to UEs is also worth considering in resource allocation."
950
+ },
951
+ {
952
+ "type": "title",
953
+ "bbox": [
954
+ 0.114,
955
+ 0.84,
956
+ 0.225,
957
+ 0.856
958
+ ],
959
+ "angle": 0,
960
+ "content": "C. Hardware"
961
+ },
962
+ {
963
+ "type": "text",
964
+ "bbox": [
965
+ 0.111,
966
+ 0.871,
967
+ 0.885,
968
+ 0.915
969
+ ],
970
+ "angle": 0,
971
+ "content": "To achieve ubiquitous deployment of RIS-aided CF massive MIMO systems, the cost and quality of hardware for receivers and transmitters as well as RIS naturally swing a non-trivial"
972
+ }
973
+ ],
974
+ [
975
+ {
976
+ "type": "page_number",
977
+ "bbox": [
978
+ 0.868,
979
+ 0.038,
980
+ 0.882,
981
+ 0.047
982
+ ],
983
+ "angle": 0,
984
+ "content": "14"
985
+ },
986
+ {
987
+ "type": "text",
988
+ "bbox": [
989
+ 0.111,
990
+ 0.083,
991
+ 0.885,
992
+ 0.417
993
+ ],
994
+ "angle": 0,
995
+ "content": "trade-off in system design. Yet, to overcome the associated high cost, we typically utilize low-cost components at APs. Unfortunately, the use of low-cost hardware at RIS affects the hardware accuracy, including the limited phase shift resolution and the mutual coupling of phase and incidence angles in each RIS element. In practice, the non-linear amplitude and phase response of RIS would affect the accuracy of reflected signals, resulting in unsatisfactory system performance. As a result, a pragmatic system design taking into the potential hardware imperfect is necessary. Besides, the hardware design of energy reception introduces extra hardware complexity in fabricating RIS. Indeed, designing WET elements at the wavelength level is challenging. In addition, how to avoid the coupling interference between the WET elements and the reflecting elements is also a problem that needs to be considered. On the other hand, the fronthaul capacity limitation reduces the availability of converging control signals between the APs and the CPU. As such, the above hardware impairment problems would definitely degrade the communication and energy transmission performance of the system that are important issues for investigation."
996
+ },
997
+ {
998
+ "type": "title",
999
+ "bbox": [
1000
+ 0.421,
1001
+ 0.44,
1002
+ 0.576,
1003
+ 0.456
1004
+ ],
1005
+ "angle": 0,
1006
+ "content": "V. CONCLUSIONS"
1007
+ },
1008
+ {
1009
+ "type": "text",
1010
+ "bbox": [
1011
+ 0.111,
1012
+ 0.47,
1013
+ 0.884,
1014
+ 0.646
1015
+ ],
1016
+ "angle": 0,
1017
+ "content": "In this article, we investigate the promising RIS-aided CF massive MIMO system with WET technology for realizing IoE in future wireless networks. First, we discussed several potential application scenarios of the system and the proposed system architecture. Besides, we proposed different operation modes and shed light on the suitable RIS hardware design for WET. In addition, we investigated feasible solutions for the possible deployment of RISs in the system. Finally, to offer useful guidance for future research, we indicated the critical challenges and promising research directions for realizing RIS-aided CF massive MIMO systems with WET."
1018
+ },
1019
+ {
1020
+ "type": "title",
1021
+ "bbox": [
1022
+ 0.442,
1023
+ 0.67,
1024
+ 0.555,
1025
+ 0.685
1026
+ ],
1027
+ "angle": 0,
1028
+ "content": "REFERENCES"
1029
+ },
1030
+ {
1031
+ "type": "ref_text",
1032
+ "bbox": [
1033
+ 0.12,
1034
+ 0.701,
1035
+ 0.884,
1036
+ 0.737
1037
+ ],
1038
+ "angle": 0,
1039
+ "content": "[1] H. Tataria, M. Shafi, A. F. Molisch, M. Dohler, H. Sjoland, and F. Tufvesson, “6G wireless systems: Vision, requirements, challenges, insights, and opportunities,” Proc. IEEE, vol. 109, no. 7, pp. 1166–1199, Jul. 2021."
1040
+ },
1041
+ {
1042
+ "type": "ref_text",
1043
+ "bbox": [
1044
+ 0.12,
1045
+ 0.74,
1046
+ 0.885,
1047
+ 0.775
1048
+ ],
1049
+ "angle": 0,
1050
+ "content": "[2] J. Hu, Q. Wang, and K. Yang, “Energy self-sustainability in full-spectrum 6G,” IEEE Wireless Commun., vol. 28, no. 1, pp. 104–111, Jan. 2020."
1051
+ },
1052
+ {
1053
+ "type": "ref_text",
1054
+ "bbox": [
1055
+ 0.12,
1056
+ 0.78,
1057
+ 0.885,
1058
+ 0.814
1059
+ ],
1060
+ "angle": 0,
1061
+ "content": "[3] J. Zhang, E. Björnson, M. Matthaiou, D. W. K. Ng, H. Yang, and D. J. Love, “Prospective multiple antenna technologies for beyond 5G,” IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1637–1660, Aug. 2020."
1062
+ },
1063
+ {
1064
+ "type": "ref_text",
1065
+ "bbox": [
1066
+ 0.12,
1067
+ 0.82,
1068
+ 0.885,
1069
+ 0.855
1070
+ ],
1071
+ "angle": 0,
1072
+ "content": "[4] S. Chen, J. Zhang, E. Björnson, J. Zhang, and B. Ai, “Structured massive access for scalable cell-free massive MIMO systems,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1086–1100, Apr. 2020."
1073
+ },
1074
+ {
1075
+ "type": "ref_text",
1076
+ "bbox": [
1077
+ 0.12,
1078
+ 0.859,
1079
+ 0.885,
1080
+ 0.913
1081
+ ],
1082
+ "angle": 0,
1083
+ "content": "[5] W. Tang, M. Z. Chen, J. Y. Dai, Y. Zeng, X. Zhao, S. Jin, Q. Cheng, and T. J. Cui, \"Wireless communications with programmable metasurface: New paradigms, opportunities, and challenges on transceiver design,\" IEEE Wireless Commun., vol. 27, no. 2, pp. 180-187, Feb. 2020."
1084
+ },
1085
+ {
1086
+ "type": "list",
1087
+ "bbox": [
1088
+ 0.12,
1089
+ 0.701,
1090
+ 0.885,
1091
+ 0.913
1092
+ ],
1093
+ "angle": 0,
1094
+ "content": null
1095
+ }
1096
+ ],
1097
+ [
1098
+ {
1099
+ "type": "page_number",
1100
+ "bbox": [
1101
+ 0.868,
1102
+ 0.038,
1103
+ 0.882,
1104
+ 0.047
1105
+ ],
1106
+ "angle": 0,
1107
+ "content": "15"
1108
+ },
1109
+ {
1110
+ "type": "ref_text",
1111
+ "bbox": [
1112
+ 0.12,
1113
+ 0.084,
1114
+ 0.885,
1115
+ 0.119
1116
+ ],
1117
+ "angle": 0,
1118
+ "content": "[6] Q. Wu and R. Zhang, \"Towards smart and reconfigurable environment: Intelligent reflecting surface aided wireless network,\" IEEE Commun. Mag., vol. 58, no. 1, pp. 106-112, Jan. 2019."
1119
+ },
1120
+ {
1121
+ "type": "ref_text",
1122
+ "bbox": [
1123
+ 0.12,
1124
+ 0.124,
1125
+ 0.884,
1126
+ 0.179
1127
+ ],
1128
+ "angle": 0,
1129
+ "content": "[7] M. Di Renzo, A. Zappone, M. Debbah, M.-S. Alouini, C. Yuen, J. De Rosny, and S. Tretyakov, “Smart radio environments empowered by reconfigurable intelligent surfaces: How it works, state of research, and the road ahead,” IEEE J. Sel. Areas Commun., vol. 38, no. 11, pp. 2450–2525, Nov. 2020."
1130
+ },
1131
+ {
1132
+ "type": "ref_text",
1133
+ "bbox": [
1134
+ 0.12,
1135
+ 0.184,
1136
+ 0.885,
1137
+ 0.22
1138
+ ],
1139
+ "angle": 0,
1140
+ "content": "[8] Q. Wu and R. Zhang, \"Joint active and passive beamforming optimization for intelligent reflecting surface assisted SWIPT under QoS constraints,\" IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1735-1748, Aug. 2020."
1141
+ },
1142
+ {
1143
+ "type": "ref_text",
1144
+ "bbox": [
1145
+ 0.12,
1146
+ 0.224,
1147
+ 0.885,
1148
+ 0.259
1149
+ ],
1150
+ "angle": 0,
1151
+ "content": "[9] S. Hu, Z. Wei, Y. Cai, C. Liu, D. W. K. Ng, and J. Yuan, \"Robust and secure sum-rate maximization for multiuser MISO downlink systems with self-sustainable IRS,\" IEEE Trans. Commun., early access, 2021."
1152
+ },
1153
+ {
1154
+ "type": "ref_text",
1155
+ "bbox": [
1156
+ 0.115,
1157
+ 0.263,
1158
+ 0.882,
1159
+ 0.298
1160
+ ],
1161
+ "angle": 0,
1162
+ "content": "[10] T. Van Chien, H. Q. Ngo, S. Chatzinotas, M. Di Renzo, and B. Ottersten, \"Reconfigurable intelligent surface-assisted cell-free massive MIMO systems over spatially-correlated channels,\" arXiv preprint arXiv:2104.08648, 2021."
1163
+ },
1164
+ {
1165
+ "type": "ref_text",
1166
+ "bbox": [
1167
+ 0.115,
1168
+ 0.303,
1169
+ 0.882,
1170
+ 0.338
1171
+ ],
1172
+ "angle": 0,
1173
+ "content": "[11] Z. Zhang and L. Dai, \"A joint precoding framework for wideband reconfigurable intelligent surface-aided cell-free network,\" IEEE Trans. Signal Process., vol. 69, pp. 4085-4101, 2021."
1174
+ },
1175
+ {
1176
+ "type": "ref_text",
1177
+ "bbox": [
1178
+ 0.115,
1179
+ 0.342,
1180
+ 0.882,
1181
+ 0.377
1182
+ ],
1183
+ "angle": 0,
1184
+ "content": "[12] M. Giordani, M. Polese, M. Mezzavilla, S. Rangan, and M. Zorzi, “Toward 6G networks: Use cases and technologies,” IEEE Commun. Mag., vol. 58, no. 3, pp. 55–61, Mar. 2020."
1185
+ },
1186
+ {
1187
+ "type": "ref_text",
1188
+ "bbox": [
1189
+ 0.115,
1190
+ 0.382,
1191
+ 0.882,
1192
+ 0.417
1193
+ ],
1194
+ "angle": 0,
1195
+ "content": "[13] J. Zhang, J. Zhang, D. W. K. Ng, S. Jin, and B. Ai, \"Improving sum-rate of cell-free massive MIMO with expanded compute-and-forward,\" IEEE Trans. Signal Process., pp. 1-1, 2021."
1196
+ },
1197
+ {
1198
+ "type": "ref_text",
1199
+ "bbox": [
1200
+ 0.115,
1201
+ 0.422,
1202
+ 0.882,
1203
+ 0.457
1204
+ ],
1205
+ "angle": 0,
1206
+ "content": "[14] C. Liaskos, S. Nie, A. Tsioliaridou, A. Pitsillides, S. Ioannidis, and I. Akyildiz, “A new wireless communication paradigm through software-controlled metasurfaces,” IEEE Commun. Mag., vol. 56, no. 9, pp. 162–169, Sep. 2018."
1207
+ },
1208
+ {
1209
+ "type": "ref_text",
1210
+ "bbox": [
1211
+ 0.114,
1212
+ 0.461,
1213
+ 0.882,
1214
+ 0.497
1215
+ ],
1216
+ "angle": 0,
1217
+ "content": "[15] X. Liu, Y. Liu, Y. Chen, and H. V. Poor, “RIS enhanced massive non-orthogonal multiple access networks: Deployment and passive beamforming design,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1057–1071, Apr. 2020."
1218
+ },
1219
+ {
1220
+ "type": "list",
1221
+ "bbox": [
1222
+ 0.114,
1223
+ 0.084,
1224
+ 0.885,
1225
+ 0.497
1226
+ ],
1227
+ "angle": 0,
1228
+ "content": null
1229
+ }
1230
+ ]
1231
+ ]
2201.11xxx/2201.11302/441c1cbe-30a6-4111-898d-1d3614bc6617_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:221861d7c00f27249e9d44725c79a7ab25e3b684013107b8e0e121bdd71ca8a3
3
+ size 2793672
2201.11xxx/2201.11302/full.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Wireless Energy Transfer in RIS-Aided
2
+
3
+ # Cell-Free Massive MIMO Systems:
4
+
5
+ # Opportunities and Challenges
6
+
7
+ Enyu Shi, Jiayi Zhang, Senior Member, IEEE, Shuaifei Chen, Jiakang Zheng, Yan Zhang,
8
+
9
+ Derrick Wing Kwan Ng, Fellow, IEEE, and Bo Ai, Fellow, IEEE
10
+
11
+ # Abstract
12
+
13
+ In future sixth-generation (6G) mobile networks, the Internet-of-Everything (IoE) is expected to provide extremely massive connectivity for small battery-powered devices. Indeed, massive devices with limited energy storage capacity impose persistent energy demand hindering the lifetime of communication networks. As a remedy, wireless energy transfer (WET) is a key technology to address these critical energy supply issues. On the other hand, cell-free (CF) massive multiple-input multiple-output (MIMO) systems offer an efficient network architecture to realize the roll-out of the IoE. In this article, we first propose the paradigm of reconfigurable intelligent surface (RIS)-aided CF massive MIMO systems for WET, including its potential application scenarios and system architecture. The four-stage transmission procedure is discussed and analyzed to illustrate the practicality of the architecture. Then we put forward and analyze the hardware design of RIS. Particularly, we discuss the three corresponding operating modes and the amalgamation of WET technology and RIS-aided CF massive MIMO. Representative simulation results are given to confirm the superior performance achieved by our proposed schemes. Also, we investigate the optimal location of deploying multiple RISs to achieve the best system performance. Finally, several important research directions of RIS-aided CF massive MIMO systems with WET are presented to inspire further potential investigation.
14
+
15
+ E. Shi, J. Zhang, S. Chen, J. Zheng, and Y. Zhang are with the School of Electronic and Information Engineering, Beijing Jiaotong University, Beijing 100044, P. R. China. (e-mail: {jiayizhang} @bjtu.edu.cn).
16
+ B. Ai is with the State Key Laboratory of Rail Traffic Control and Safety, Beijing Jiaotong University, Beijing 100044, China. (e-mail: boai@bjtu.edu.cn).
17
+ D. W. K. Ng is with the School of Electrical Engineering and Telecommunications, University of New South Wales, NSW 2052, Australia. (e-mail: w.k.ng@unsw.edu.au).
18
+
19
+ # I. INTRODUCTION
20
+
21
+ The fifth-generation (5G) wireless network has targeted a 1000-fold increase in network capacity offering ubiquitous wireless connection for at least 100 billion devices worldwide, compared with the previous generations of networks. Recently, with the large-scale commercialization of the fifth-generation (5G) worldwide, the global industry has begun initial research on the next-generation mobile communication technology, i.e., the sixth-generation (6G). One of the key performance indicators for 6G is its extremely massive connectivity for small devices to enable the so-called Internet-of-Everything (IoE) [1]. In practice, most of these IoE devices will be either battery-powered or battery-less due to the associated high-cost of applying conventional power-grid-based solutions. Unfortunately, the use of limited battery power shortens the lifetime of networks degrading the quality of service. Although frequent battery replacement offers an intermediate solution to this problem, a large number of devices in IoE would further lead to exceedingly high labor and material costs. Therefore, advanced energy replenishing solutions are urgently needed to improve the energy supply challenges of future networks.
22
+
23
+ Wireless energy transfer (WET) has been proposed to address various practical scenarios where adopting electrical grid is not possible, such as unmanned aerial vehicle (UAV) communications, wireless sensor networks with sensors embedded in challenging environment structures, or inside a human body [2]. By exploiting the far-field radiation properties of electromagnetic (EM) waves, the radio frequency (RF) energy signal radiated by the transmitter can be harvested at the receiver which converts it into electrical energy for future use. However, WET technologies face various technical problems such as large path loss attenuation, challenging energy beam alignment, and inefficient resource allocation. Hence, to fully unlock the potential of WET, it must be combined with other advanced communication technologies and architectures to fully unlock the potential of practical IoE networks.
24
+
25
+ The cellular concept has been introduced over half a century for handling a small number of users in a large area. Recently, cell-free (CF) massive MIMO systems have been proposed [3], which advocates the removal of cellular boundaries for supporting the massive number of users. In practice, this paradigm can effectively shorten the distance between communication devices and access points (APs) through distributed deployment, thereby improving communication performance. Specifically, all the APs are connected to a central processing unit (CPU) with high-speed fronthaul links. Different from conventional centralized massive MIMO systems,
26
+
27
+ ![](images/a7dc43ae540c062166af605ce19b227126de2b7bf996f24dd9214c3878983126.jpg)
28
+ Fig. 1: Application scenarios of RIS-aided CF massive MIMO systems with WET.
29
+
30
+ APs are deployed in a certain range in a distributed manner and cooperate among themselves which offers rich spatial diversity to improve the system spectral efficiency (SE). On the other hand, the use of high-frequency bands, e.g., terahertz (THz), is expected for 6G networks to cope with the aggressive needs required in massive access [4]. Indeed, by further considering the path loss in high-frequency bands, super-dense APs have to be deployed for reducing communication distances and for ensuring line-of-sight (LoS) between APs and IoE devices. In general, to support such large-scale multiple access networks, a large amount of energy would be radiated, while the increased interference imposed by super-dense APs has to be carefully managed and controlled. Therefore, it is imperative to study an innovative, spectrally, and energy-efficient, but low-cost 6G wireless network solution.
31
+
32
+ Recently, reconfigurable intelligent surface (RIS) has been proposed as a promising new technology for reconfiguring the wireless propagation environment through software-controlled signal reflection [5]–[7]. Specifically, RIS requires only low power consumption and low cost. Undoubtedly, RIS can address the shortcomings of CF architecture in future communications and these two technologies complement each other. On the other hand, although RIS was initially proposed as a passive component, the coordination of a large number of elements still requires a certain amount of electrical energy [7]. When there are a large number of RISs, configuring a physical link for each RIS would cause huge resource consumption. Therefore, WET technology is an excellent solution to replace the grid energy-based approach. The energy supply of RIS through WET technology is expected to realize passive deployment of RIS, reduce hardware
33
+
34
+ overhead, and improve the system energy efficiency. Despite its great potential, there are relatively little researches on RIS-aided CF massive MIMO systems at present. In fact, some authors have studied the system performance of a single-RIS system, or optimized communication problems in multiple RIS systems [8]. Also, the authors have studied the combination of RIS and WET and the corresponding optimization through advanced optimization [9]. The results unveiled the non-trivial trade-off between achieving RIS self-sustainability and the system sum-rate. Others have studied the system performance of a single RIS-aided CF massive MIMO systems under idealistic conditions such as with sufficient energy storage and Rayleigh fading channels [10]. Besides, the authors introduced a precoding framework for RIS-aided CF networks [11]. Nevertheless, there is a lack of thorough research on the study of RIS-aided CF massive MIMO systems and their applications with WET technology.
35
+
36
+ In this article, we try to answer the question: How to apply WET technology in RIS-aided CF massive MIMO systems? To fully exploit the RIS benefits and the WET technology, we design the system architecture applying the WET technology to RIS and discuss its potential future application scenarios. In addition, we design the transmission procedures of the considered system and analyze each procedure. Based on this, we design different hardware architectures of RIS with WET technology for different practical scenarios. Meanwhile, we propose and compare different operation modes of this system, which provide useful insights for the implication of RIS-aided CF massive MIMO systems. With the novel system architecture, we also discuss how to deploy RIS to achieve better system performance. Finally, we highlight potential research directions that deserve further study.
37
+
38
+ # II. MAIN APPLICATION SCENARIOS AND SYSTEM ARCHITECTURE
39
+
40
+ In this section, we introduce the main application scenarios of RIS-aided CF massive MIMO systems in future wireless networks and analyze the characteristics of different scenarios. Meanwhile, we provide a detailed introduction to the architecture of the considered system and the corresponding transmission procedure.
41
+
42
+ # A. Main Application Scenarios
43
+
44
+ In the current research, e.g., [6], [7] as a passive device, RIS can be flexibly deployed in communication systems for improving the coverage area and providing wireless services for dead spots. In future 6G network architecture, RIS should be combined with other technologies
45
+
46
+ ![](images/5769aeb19d15bd1a24f70f854116f0eca3416e63d07421c57d86885c04b162fb.jpg)
47
+ Fig. 2: System architecture of RIS-aided CF massive MIMO systems with WET.
48
+
49
+ to achieve better communication performance. Our proposed RIS-aided CF massive MIMO communication architecture serves as an excellent candidate for better exploiting the advantages of WET technology and realizing the vision of IoE.
50
+
51
+ The future wireless networks are expected to make full use of the low, medium, and high full-spectrum resources to achieve seamless global coverage of space, sky, and earth trinity, such that they can satisfy the stringent demand for establishing unlimited safe and reliable "human-machine-object" connections anytime and anywhere. Indeed, the success of this desired vision relies on the support of massive access required by the IoE, requiring higher transmission rates, lower delays, and higher reliability [12]. The main scenes include two categories, densely populated spaces and large-scale factories with densely deployed equipment. As shown in Fig. 1, crowded spaces include large indoor shopping malls, basketball courts, restaurants, stadiums, and so on. In contrast, the scene of a large-scale factory with densely deployed equipment is relatively static and the only main challenge is to facilitate energy harvesting at potential equipment. Note that in the factory scenario, CPU and AP are internal devices rather than external additional devices. The main feature of the former is that the mobility of personnel is relatively high and the wireless equipment in the latter has only limited mobility. For the former scenes, there is generally more uncertainty in wireless communication channels. For example, for the indoor scenario, under the original CF system architecture, we can deploy
52
+
53
+ ![](images/ff669911f60226f4397ed8ecada4f6b96b06fdabe9ccc175dd8001762e985394.jpg)
54
+ Fig. 3: Four-stage transmission procedure of RIS-aided CF massive MIMO systems with WET.
55
+
56
+ multiple RISs in some communication dead zones and improve the quality of communication by increasing the number of RISs. Since WET technology is exploited to supply the necessary energy to RISs, there is no need to deploy physical power lines for charging which is more flexible in practical implementation. Moreover, in the indoor environment, there is an upsurge in the demand for temporary communication such as large-scale activities in which the role of flexible RIS deployment is particularly prominent. As for outdoor environments such as the gymnasium, we can deploy an unmanned aerial vehicle (UAV) on-demand as a carrier of RIS to offer signal coverage and enhancement through intelligent design of the track. On the other hand, for the latter scenes that equipment is not mobile, we only need to deploy the RIS in a fixed location in advance to improve the system performance.
57
+
58
+ # B. System Architecture
59
+
60
+ As shown in Fig. 2, the system is based on the CF architecture with an "additional" RIS layer between the user equipments (UEs) and the APs. The existing CF massive MIMO system is a three-tier structure composed of the CPU, APs, and UEs. Our proposed system architecture realizes a 3.5-layer architecture, which adds a cascading link through RIS. When there is a direct path, the APs can receive the signal from the UEs via two uplink paths: the direct link and the aggregated link through the RIS. As such, we treat the channels through the RIS with
61
+
62
+ an additional 0.5 layer. In particular, when the direct path is blocked, this proposed structure can still guarantee stable user communication.
63
+
64
+ The CPU has an extremely high computational capability, which not only can receive or transmit a large amount of information but also process the large number of AP receiving data [13]. In contrast, the AP has limited computing resources which are equipped with a simple radio frequency antenna. In the considered system, the difference between the proposed RIS and the traditional one is that our proposed one allows RIS to be equipped with a wireless energy harvesting module, which replaces the original wired-power circuit. By adopting the WET technology, AP is considered as an energy source to charge the RIS module wirelessly to ensure the normal operation of RIS.
65
+
66
+ To realize this paradigm, we introduce the flow of the four-stage transmission procedure of the system in Fig. 3. Stage I: Downlink energy transmission: The CPU collects the information transmitted by the AP via the fronthaul and sends control commands to the AP after signal processing. Based on the received signal, AP decides whether to transmit wireless energy signals to the RIS for energy harvesting. Especially, when the stored energy level in the RIS exceeds a threshold value, the RIS controller transmits a feedback signal to the AP for terminating the energy transmission. Stage II: Data transmission: In the uplink, the UE first sends a pilot signal to the APs which reaches the APs directly or through the RISs cascaded channel. Then the APs receive the pilot signal and convey it to the CPU via dedicated fronthaul for channel estimation. Subsequently, the UE sends their uplink data and the APs receive the signal via a direct path as well as the reflected signal arriving through RISs beamforming. Besides, the APs send their received data to the CPU, which performs joint signal processing for data detection. Finally, the CPU obtains the uplink decoded signal based on the global channel estimation. Then the downlink data signal is generated by the downlink precoder and transmitted to the AP, which finally reaches the UEs through the RIS. Meanwhile, AP generates the signal to control the RIS for phases adaptation. Note that if the AP desires to realize dynamic control to RIS, it is necessary to modify the frame structure and to insert some control time slots. Correspondingly, deploying signal processing modules at RIS may be needed to respond to the control signals.
67
+
68
+ # III. DEPLOYMENT DESIGN
69
+
70
+ In this section, we propose a RIS hardware design scheme supported by the WET technology. On this basis, we explore different system operation modes and compare their advantages and
71
+
72
+ disadvantages. Finally, we discuss various practical scenarios on how to effectively determine the location of RIS deployments in practice.
73
+
74
+ # A. Hardware Design
75
+
76
+ The hardware implementation of RIS is based on the concept of "metasurface", each element of which is a programmable sub-wavelength structural unit composed of two-dimensional metamaterials [14]. In practice, the field-programmable gate array (FPGA) can be used as a controller to achieve flexible control of the RIS, which usually communicates and coordinates with other network components (e.g., BS, APs, and UEs) through dedicated links. Although FPGA consumes a small amount of energy, it still needs some electrical power source to support its operation. As shown in Fig. 4, we introduce the wireless energy scavenging module to the original RIS panel and exploit some elements for energy reception and other elements for signal reflection. The energy harvesting elements are connected with a piece of energy storage hardware (e.g., a rechargeable battery), which can store the harvested energy and support the energy consumption of other elements performing reflection.
77
+
78
+ RIS is generally assembled by hundreds of elements [6], so it is worth exploring which elements are selected for serving as energy receiving modules. Here, we have designed and compared three types of hardware structures in Fig. 4. The first type consists of a complete separation of the energy harvesting elements and the information reflecting elements which are easy to implement in hardware. Yet, when the RIS panel is large, there would be some energy reception dead spots in this design due to non-uniform energy flux created by impinging signals. Based on this, we further propose an improved block structure, i.e., deployment energy harvesting elements at the four corners of the RIS, i.e., type 2, which can alleviate the impacts of dead spots but introduce moderate hardware implementation difficulty. Finally, we also design a scattered structure that aims at reducing the impacts caused by dead zones for energy harvesting or signal reflection, but at the cost of higher hardware complexity. In practice, the energy consumption of the centralized and distributed element designs mainly depends on the number of elements used for information reflection. Indeed, in addition to considering the balance between the system performance and the implementation complexity, the ratio between the number of RIS elements in energy harvesting mode and that in reflecting mode is another key issue. This is determined by various factors, such as distance, energy conversion efficiency, and the channel environment.
79
+
80
+ In practice, the information and energy elements ratios can be adaptively adjusted according to the feedback of the actual parameters to realize dynamic assignment [9].
81
+
82
+ ![](images/9021a1480b99404764ff42cd140d56dec3e29ced42efef799c0df0e5aeed7ef0.jpg)
83
+ Fig. 4: Hardware designs for information and energy transmission in a RIS. Type 1: Energy harvesting and information reflecting elements are completely separated and distributed in blocks. Type 2: Block structure with incomplete separation of energy harvesting and information reflection elements. Type 3: Energy harvesting and information reflecting elements alternate with each other in a scattered distribution.
84
+
85
+ ![](images/8a7507b0433af4224e4644d9e3959354d7b674144a547c4bc17d999cb37e8b1c.jpg)
86
+
87
+ ![](images/44d1e634b3a52519b3d67fd6b6f4484e9385e8369f58a4ac4a077bfd580beb7b.jpg)
88
+
89
+ # B. Operation Modes
90
+
91
+ In practice, there are different ways for deploying RISs leading to different system operation modes. They affect the hardware design complexity and signal processing methods, as well as the synergy between individual communication devices, which inevitably brings different system performances. In this section, we discuss several different operation modes of RIS for WET and information reflection.
92
+
93
+ 1) Centralized RIS: As shown in Fig. 6, we first consider that there is only a single large-scale centralized RIS with massive elements. It is configured with high computational power and signal processing capability at the RIS controller, which can reduce the processing burden of the AP. Meanwhile, the centralized RIS has high beamforming capability that can assist the APs to service massive UEs via excellent interference management [9]. Moreover, the centralized design facilitates highly efficient wireless energy focusing, while it is easier to deploy and maintain the equipment. However, due to the architecture of centralized RIS, the path loss of the long-distance devices can be severe as UEs are randomly deployed. Also, the ability of WET to keep such a sophisticated controller functioning properly still needs to be explored. Besides, the requirements for hardware design are also demanding resulting in high cost.
94
+
95
+ 2) Non-cooperative Distributed RIS: In this case, we assume that multiple RISs have noncooperatively deployed in a service area. This distributed deployment is more flexible and can avoid dead spots in WET as much as possible. Meanwhile, the distance between RIS-AP and RIS-UE is shortened that reducing the path loss of WET. As such, significant improvements in energy efficiency and communication quality can be achieved. In practice, the hardware design of each RIS is still relatively simple since it only expects to implement simple signal reflection. Besides, since multiple non-cooperative RISs are distributed in the service area, the signals between different RISs may interfere with each other, which degrades the communication performance of UEs.
96
+
97
+ 3) Cooperative Distributed RIS: Similar to the last operation mode, we consider that there are multiple RISs in a service area. The difference is that there is some intelligent cooperation among RISs through physical links or dedicated wireless communication as shown in Fig. 6. Note that when there is a physical link connected to the RIS, WET is regarded as supplementary electric energy to reduce the required energy consumption. This operation mode not only reduces the path loss of WET and information communication but also enables better beam management among multiple RISs through cooperation thus reducing interference. However, it also places additional requirements on the hardware design of RISs, such as how to implement physical connections between multiple RISs and the design of the controller. As such, RIS will generate additional energy consumption for channel estimation and cooperation. However, due to the stable characteristics of the channel, frequent communication between RISs is not required, so the required energy consumption is still reasonable. In practice, a high computational requirement at the CPU and advanced optimization algorithms are needed to achieve efficient and intelligent cooperation of RISs. In practice, centralized RIS is more suitable for small-scale networks with dense users. In contrast, distributed RIS has better performance in large-scale networks due to its inherent ability in exploiting spatial diversities.
98
+
99
+ In Fig. 5, we compare different RIS operation modes for different element numbers within an area of $1000\mathrm{m}^2$ and consider the effect of phase errors. The achievable rate (bps/Hz) and the transmit power (dBm) are the two main performance metrics for information communication. It shows that RIS can significantly improve the system performance and compared with centralized RIS, distributed deployment can obtain higher system achievable rate. Moreover, phase errors reduce the performance of the system and the influence of phase errors is more significant with the increase of the RIS element numbers.
100
+
101
+ ![](images/52618527df25fc75cfd7c2265d38666cda59f368ece562e7c3bf21bec17a3526.jpg)
102
+ Fig. 5: Achievable rate of different RIS operation modes and elements with phase errors.
103
+
104
+ ![](images/4b1b9c86b45b08aba41cf7f38830e7efa6bcd40fd039b72da9b30af6e694a057.jpg)
105
+ Fig. 6: Comparison of different RIS operation modes.
106
+
107
+ # C. RIS Deployment
108
+
109
+ How to judiciously deploy multiple RISs in a hybrid wireless network containing passive RISs, active CPUs, and APs to optimize the system performance is a critical problem that needs to be solved. Previous studies have provided some solutions for RIS location deployment, such as deploying a RIS close to the UE or the AP side can obtain higher system performance [15]. Unfortunately, the considered system is not a single point-to-point communication and the RIS needs support from the WET technology. In the considered network, energy and information transmission are two equally important performance metrics.
110
+
111
+ First, from the perspective of optimizing the system communication performance, the RIS should be intuitively deployed in a location with a clear LoS from the AP to maximize its received signal power for passive beamforming and WET. Although this deployment scheme enjoys good performance for the case of a single UE, it does not work well when the number of UEs increases, due to the lack of enough non-LoS paths to achieve high-rank MIMO channels. To enhance the experience of UEs with poor communication quality, we should also deploy
112
+
113
+ some RISs in dedicated areas with weak signal coverage and communication dead zones caused by blockages. On the other hand, from the perspective of WET, RIS should be deployed in scenarios where it is difficult to install powerlines for emergencies, such as UAV communication and temporary placement sites. In practice, ensuring proper system operation first requires that the RIS can effectively receive wireless energy signals to support its operation. As such, a RIS should be reasonably deployed near an AP to facilitate the establishment of a LoS path.
114
+
115
+ In practice, the dynamic of propagation environment and user mobility lead to channel variations over time and each RIS may also be associated with multiple APs and UEs while multiple RISs may be interconnected. In such cases, heuristic solutions to the design of RISs deployment may be ineffective. In particular, it is generally intractable to acquire accurate globally channel state information (CSI) at a low cost in large-size systems. Therefore, how to determine a precise location deployment scheme of multiple RISs based on partial system information is a new problem with high practical significance. A promising approach to solve this problem is to utilize machine learning techniques, such as deep learning (DL). For example, in the training phase, we can empirically deploy multiple RISs at reasonable reference locations and collect key performance metrics, such as the received signal strength and the corresponding energy efficiency measured at different UE locations. The collected key metrics and the location of the RIS are then exploited to train the DL-based neural network as an input and an output, respectively. However, during the training process, it is generally not possible to change the locations of RISs and additional installation costs will be incurred. In fact, the obtained results serve as a performance upper bound for a fixed RIS deployment location. As for more practical implementation, one may consider statical long-term optimization. Besides, we can adopt convex optimization methods such as gradient descent method and Newton method to optimize RIS location and system power allocation to improve the system performance.
116
+
117
+ # IV. FUTURE DIRECTIONS
118
+
119
+ In addition to the system architecture and deployment schemes discussed above, there still are some other research directions worthy of future investigation for the systems.
120
+
121
+ # A. Control Scheme
122
+
123
+ For a single RIS, how to control the activation of energy harvesting and information reflection elements is an important problem to be solved with the following aspects. For instance, stringent
124
+
125
+ requirements are imposed for the hardware design of RIS while higher information receiving and processing capabilities are also required. Besides, the massive elements in RIS incur new challenges to the optimization algorithm of high-dimensional matrices [9]. As for the systems, the traditional analysis results in the literature do not consider the cooperation among RISs, which may lead to UE unfairness and communication quality degradation due to signal interference among multiple RISs. To fully utilize the advantage of the strong directivity of the RIS reflected beam, the RIS phase shift matrix is designed according to the AP and UE positions to achieve precise control of the beam direction, thereby avoiding interference as much as possible. For example, with a large number of APs and RISs, computationally-efficient distributed machine learning algorithms serve as appealing solutions as they have been successfully applied to address large-scale optimization problems. However, a rigorous control protocol must be designed to ensure orderly communication and harness the interference among devices.
126
+
127
+ # B. Resource Allocation
128
+
129
+ Large and complex systems inevitably face the problem of resource allocation. In traditional CF systems, pilot assignment is an important research direction that is also inherent to our system. Besides, for the consideration of supporting WET technology, power allocation is another key issue worthy of attention. Specifically, we need to control the energy supply offered by APs to optimize the system energy efficiency. If all the APs adopt a uniform power allocation to supply energy to RISs, it is generally suboptimal which can cause energy waste. As such, certain optimization mechanisms are needed to maximize power utilization. For instance, for imperfect CSI, we can exploit the large-scale channel information and solve the related resource allocation problem through optimization algorithms such as geometric programming. In practice, formulating design optimization problems to realize accurate and efficient power allocation is an ideal solution. However, most of the practical problems are non-convex and intractable in complex networks. Hence, finding an optimal power control scheme within a reasonable timescale is an urgent future research direction. It is worth mentioning that using the WET technology to supply energy to UEs is also worth considering in resource allocation.
130
+
131
+ # C. Hardware
132
+
133
+ To achieve ubiquitous deployment of RIS-aided CF massive MIMO systems, the cost and quality of hardware for receivers and transmitters as well as RIS naturally swing a non-trivial
134
+
135
+ trade-off in system design. Yet, to overcome the associated high cost, we typically utilize low-cost components at APs. Unfortunately, the use of low-cost hardware at RIS affects the hardware accuracy, including the limited phase shift resolution and the mutual coupling of phase and incidence angles in each RIS element. In practice, the non-linear amplitude and phase response of RIS would affect the accuracy of reflected signals, resulting in unsatisfactory system performance. As a result, a pragmatic system design taking into the potential hardware imperfect is necessary. Besides, the hardware design of energy reception introduces extra hardware complexity in fabricating RIS. Indeed, designing WET elements at the wavelength level is challenging. In addition, how to avoid the coupling interference between the WET elements and the reflecting elements is also a problem that needs to be considered. On the other hand, the fronthaul capacity limitation reduces the availability of converging control signals between the APs and the CPU. As such, the above hardware impairment problems would definitely degrade the communication and energy transmission performance of the system that are important issues for investigation.
136
+
137
+ # V. CONCLUSIONS
138
+
139
+ In this article, we investigate the promising RIS-aided CF massive MIMO system with WET technology for realizing IoE in future wireless networks. First, we discussed several potential application scenarios of the system and the proposed system architecture. Besides, we proposed different operation modes and shed light on the suitable RIS hardware design for WET. In addition, we investigated feasible solutions for the possible deployment of RISs in the system. Finally, to offer useful guidance for future research, we indicated the critical challenges and promising research directions for realizing RIS-aided CF massive MIMO systems with WET.
140
+
141
+ # REFERENCES
142
+
143
+ [1] H. Tataria, M. Shafi, A. F. Molisch, M. Dohler, H. Sjoland, and F. Tufvesson, “6G wireless systems: Vision, requirements, challenges, insights, and opportunities,” Proc. IEEE, vol. 109, no. 7, pp. 1166–1199, Jul. 2021.
144
+ [2] J. Hu, Q. Wang, and K. Yang, “Energy self-sustainability in full-spectrum 6G,” IEEE Wireless Commun., vol. 28, no. 1, pp. 104–111, Jan. 2020.
145
+ [3] J. Zhang, E. Björnson, M. Matthaiou, D. W. K. Ng, H. Yang, and D. J. Love, “Prospective multiple antenna technologies for beyond 5G,” IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1637–1660, Aug. 2020.
146
+ [4] S. Chen, J. Zhang, E. Björnson, J. Zhang, and B. Ai, “Structured massive access for scalable cell-free massive MIMO systems,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1086–1100, Apr. 2020.
147
+ [5] W. Tang, M. Z. Chen, J. Y. Dai, Y. Zeng, X. Zhao, S. Jin, Q. Cheng, and T. J. Cui, "Wireless communications with programmable metasurface: New paradigms, opportunities, and challenges on transceiver design," IEEE Wireless Commun., vol. 27, no. 2, pp. 180-187, Feb. 2020.
148
+
149
+ [6] Q. Wu and R. Zhang, "Towards smart and reconfigurable environment: Intelligent reflecting surface aided wireless network," IEEE Commun. Mag., vol. 58, no. 1, pp. 106-112, Jan. 2019.
150
+ [7] M. Di Renzo, A. Zappone, M. Debbah, M.-S. Alouini, C. Yuen, J. De Rosny, and S. Tretyakov, “Smart radio environments empowered by reconfigurable intelligent surfaces: How it works, state of research, and the road ahead,” IEEE J. Sel. Areas Commun., vol. 38, no. 11, pp. 2450–2525, Nov. 2020.
151
+ [8] Q. Wu and R. Zhang, "Joint active and passive beamforming optimization for intelligent reflecting surface assisted SWIPT under QoS constraints," IEEE J. Sel. Areas Commun., vol. 38, no. 8, pp. 1735-1748, Aug. 2020.
152
+ [9] S. Hu, Z. Wei, Y. Cai, C. Liu, D. W. K. Ng, and J. Yuan, "Robust and secure sum-rate maximization for multiuser MISO downlink systems with self-sustainable IRS," IEEE Trans. Commun., early access, 2021.
153
+ [10] T. Van Chien, H. Q. Ngo, S. Chatzinotas, M. Di Renzo, and B. Ottersten, "Reconfigurable intelligent surface-assisted cell-free massive MIMO systems over spatially-correlated channels," arXiv preprint arXiv:2104.08648, 2021.
154
+ [11] Z. Zhang and L. Dai, "A joint precoding framework for wideband reconfigurable intelligent surface-aided cell-free network," IEEE Trans. Signal Process., vol. 69, pp. 4085-4101, 2021.
155
+ [12] M. Giordani, M. Polese, M. Mezzavilla, S. Rangan, and M. Zorzi, “Toward 6G networks: Use cases and technologies,” IEEE Commun. Mag., vol. 58, no. 3, pp. 55–61, Mar. 2020.
156
+ [13] J. Zhang, J. Zhang, D. W. K. Ng, S. Jin, and B. Ai, "Improving sum-rate of cell-free massive MIMO with expanded compute-and-forward," IEEE Trans. Signal Process., pp. 1-1, 2021.
157
+ [14] C. Liaskos, S. Nie, A. Tsioliaridou, A. Pitsillides, S. Ioannidis, and I. Akyildiz, “A new wireless communication paradigm through software-controlled metasurfaces,” IEEE Commun. Mag., vol. 56, no. 9, pp. 162–169, Sep. 2018.
158
+ [15] X. Liu, Y. Liu, Y. Chen, and H. V. Poor, “RIS enhanced massive non-orthogonal multiple access networks: Deployment and passive beamforming design,” IEEE J. Sel. Areas Commun., vol. 39, no. 4, pp. 1057–1071, Apr. 2020.
2201.11xxx/2201.11302/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2438d7853d46514472b0680bbf1d20f5b7a0c99fe0cdad55731a327498fe35ea
3
+ size 326930
2201.11xxx/2201.11302/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11332/d19f5003-2cc0-48d3-9b91-4ca890e1790e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:337ac8894f0ac3db7c754c8d8a7798c999a99daac375e9cfe792cd02489bfddb
3
+ size 2557102
2201.11xxx/2201.11332/full.md ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ontology-enhanced Prompt-tuning for Few-shot Learning
2
+
3
+ Hongbin Ye, Ningyu Zhang*
4
+
5
+ Zhejiang University
6
+
7
+ AZFT Joint Lab for Knowledge Engine
8
+
9
+ Hangzhou Innovation Center
10
+
11
+ Hangzhou, China
12
+
13
+ {yehongbin,zhangningyu}@zju.edu.cn
14
+
15
+ Shumin Deng
16
+
17
+ Zhejiang University
18
+
19
+ AZFT Joint Lab for Knowledge Engine
20
+
21
+ Hangzhou Innovation Center
22
+
23
+ Hangzhou, China
24
+
25
+ 231sm@zju.edu.cn
26
+
27
+ Xiang Chen
28
+
29
+ Zhejiang University
30
+
31
+ AZFT Joint Lab for Knowledge Engine
32
+
33
+ Hangzhou Innovation Center
34
+
35
+ Hangzhou, China
36
+
37
+ xiang_chen@zju.edu.cn
38
+
39
+ Hui Chen, Feiyu Xiong
40
+
41
+ Alibaba Group
42
+
43
+ Hangzhou, China
44
+
45
+ {weidu.ch,feiyu.xfy}@alibaba-
46
+
47
+ inc.com
48
+
49
+ Xi Chen
50
+
51
+ Tecent
52
+
53
+ ShenZhen, China
54
+
55
+ jasonxchen@tencent.com
56
+
57
+ Huajun Chen*
58
+
59
+ Zhejiang University & AZFT Joint
60
+
61
+ Lab for Knowledge Engine &
62
+
63
+ Hangzhou Innovation Center
64
+
65
+ Hangzhou, China
66
+
67
+ huajunsir@zju.edu.cn
68
+
69
+ # ABSTRACT
70
+
71
+ Few-shot Learning (FSL) is aimed to make predictions based on a limited number of samples. Structured data such as knowledge graphs and ontology libraries has been leveraged to benefit the few-shot setting in various tasks. However, the priors adopted by the existing methods suffer from challenging knowledge missing, knowledge noise, and knowledge heterogeneity, which hinder the performance for few-shot learning. In this study, we explore knowledge injection for FSL with pre-trained language models and propose ontology-enhanced prompt-tuning (OntoPrompt). Specifically, we develop the ontology transformation based on the external knowledge graph to address the knowledge missing issue, which fulfills and converts structure knowledge to text. We further introduce span-sensitive knowledge injection via a visible matrix to select informative knowledge to handle the knowledge noise issue. To bridge the gap between knowledge and text, we propose a collective training algorithm to optimize representations jointly. We evaluate our proposed OntoPrompt in three tasks, including relation extraction, event extraction, and knowledge graph completion, with eight datasets. Experimental results demonstrate that our approach can obtain better few-shot performance than baselines.
72
+
73
+ # CCS CONCEPTS
74
+
75
+ - Information systems $\rightarrow$ Information extraction.
76
+
77
+ # KEYWORDS
78
+
79
+ Few-shot Learning, Ontology, Prompt-tuning, Relation Extraction, Event Extraction, Knowledge Graph Completion
80
+
81
+ # ACM Reference Format:
82
+
83
+ Hongbin Ye, Ningyu Zhang, Shumin Deng, Xiang Chen, Hui Chen, Feiyu Xiong, Xi Chen, and Huajun Chen. 2022. Ontology-enhanced Prompt-tuning for Few-shot Learning. In Proceedings of the ACM Web Conference 2022 (WWW '22), April 25-29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3485447.3511921
84
+
85
+ # 1 INTRODUCTION
86
+
87
+ Recent years have witnessed the success of deep neural networks, however, open issues remain as they are still prone to labeled data in practice and face unignorable challenges owing to the variations of domains, language, and tasks. These drawbacks lead to the research of an important technique, few-shot learning (FSL), which can significantly improve the learning capabilities of machine intelligence and practical adaptive applications by accessing only a small number of labeled examples. Over the past few years, FSL has been introduced in a wide range of machine learning tasks, such as relation extraction [6, 32, 62, 67, 68], event extraction [10, 47] and knowledge graph completion [77].
88
+
89
+ To address the few-shot issue, on the one hand, researchers apply the meta-learning strategy to endow the new model the ability to optimize rapidly with the existing training knowledge or leverage transfer learning to alleviate the challenge of data-hungry [4, 66]. Benefiting from the self-supervised pre-training on the large corpus, the pre-train—fine-tune paradigm has become the de facto standard for natural language processing (NLP). It can perform well on downstream tasks with a small amount of task-specific data. On the other hand, structured data such as knowledge systems and ontology libraries can be modeled under the few-shot setting. Note that those prior knowledge are mostly abstract summaries of human experience and can provide vital support for FSL in various domains [12, 23, 70, 75]. In this paper, we focus on injecting knowledge for few-shot learning, and there are still several nontrivial challenges as follows:
90
+
91
+ - Knowledge Missing. Knowledge injection may not be able to retrieve task-relevant facts due to the incompleteness of the external knowledge base and thus provides no useful or even irrelevant information to downstream tasks. How to enrich task-relevant knowledge for tasks is an important issue.
92
+
93
+ - Knowledge Noise. Previous studies [34, 44, 65] have demonstrated that not all knowledge is beneficial for downstream tasks, and an indiscriminate injection of knowledge may lead to negative knowledge infusion, which is detrimental to the performance of downstream tasks. Thus, context-sensitive and task-specific knowledge selection is critical for knowledge-enhanced learning.
94
+ - Knowledge Heterogeneity. The language corpus of downstream tasks is quite different from the injected knowledge leading to two individual vector representations [78]. How to design a special joint training objective to fuse knowledge information is another challenge.
95
+
96
+ In this study, to address the above-mentioned issues, we explore knowledge injection for FSL with pre-trained language models (PLMs) and propose ontology-enhanced prompt-tuning (OntoPrompt). Note that pre-trained language models can extract plenty of statistical information from large amounts of data. And external knowledge, such as knowledge graphs, is the outcome of human wisdom, which can be good prior to the modeling of statistics.
97
+
98
+ Firstly, we propose ontology transformation to enrich and convert structure knowledge to text format. Specifically, we utilize pre-defined templates to convert knowledge to text as prompts. Prompt-tuning can reduce the gap between the pre-training model tasks and downstream tasks. For example, given a sentence s, "Turing entered King's College, Cambridge in 1931, and then went to Princeton University to study for a doctorate," we can pack them into a knowledgeable prompt based on ontology: "s. Turing [MASK] King's College, Cambridge." PLMs should predict label tokens at the masked position to determine the label of the input. Note that ontology as prompt places knowledge of entities/spans into the input text, which is model-agnostic and can be plugged into any type of pre-trained language model.
99
+
100
+ Second, we propose span-sensitive knowledge injection to select informative knowledge and mitigate injecting noise. Since not all external knowledge is advantageous for the downstream task, and some irrelevant and noisy knowledge may lead to the changes in the meaning of the original sentence, we leverage a visible matrix based on spans and their corresponding external knowledge to guild the knowledge injection. In this way, not all tokens in the input sentences will be influenced by external knowledge.
101
+
102
+ Third, we propose a collective training algorithm to jointly optimize representations. Note that the injected external knowledge should be associated with the surrounding context; we add some learnable tokens with random initialization and optimize those tokens as well as injected ontology tokens with language model fixed. Inspired by the previous study [19] that prompt-tuning in the low-data regime is unstable and may obtain poor performance, we further optimize all parameters to collective train the ontology text and input text representations.
103
+
104
+ Finally, we evaluate our OntoPrompt with three tasks: relation extraction, event extraction, and knowledge graph completion. We leverage the dataset-related ontologies as external knowledge and conduct experiments on eight datasets in the few-shot setting. Experimental results illustrate that our proposed approach can obtain
105
+
106
+ better performance. It should be noted that our approach is model-agnostic, and therefore orthogonal to existing pre-trained language models.
107
+
108
+ # 2 RELATED WORK
109
+
110
+ # 2.1 Knowledge-enhanced Learning
111
+
112
+ Pre-training fine-tuning has become a default paradigm for natural language processing. However, the performance of the knowledge-driven downstream task (for example, question answering or relation extraction) is dependent on structured relational knowledge; thus, the direct fine-tuning of pre-trained LMs yield suboptimal results. Thus, external knowledge graphs have been considered as an indispensable part of language understanding [73], which has inspired knowledge-aware models such as ERNIE [79]. To integrate the ontology knowledge, [24] propose to tackle the zero-shot event detection problem by mapping each event mentioned to a specific type in a target event ontology. [10] propose an event detection framework based on ontology embedding with event correlations, which interoperates symbolic rules with popular deep neural networks. [18] propose a novel ZSL framework called OntoZSL which not only enhances the class semantics with an ontological schema but also employs an ontology-based generative model to synthesize training samples for unseen classes. [53] propose an ontology-guided entity alignment method named OntoEA, where both knowledge graphs and their ontologies are jointly embedded, and the class hierarchy and the class disjointness are utilized to avoid false mappings.
113
+
114
+ However, the dilemma of knowledge missing, knowledge noise, and knowledge heterogeneity have not been addressed. Concretely, [34] propose K-BERT, which utilizes soft-position and visible matrix to limit the impact of knowledge. [44] propose CokeBERT, which can dynamically select and embed knowledge context according to textual context for PLMs to avoid the effect of redundant and ambiguous knowledge in knowledge graphs that cannot match the input text. [2] propose knowledge-to-text transformation to benchmark commonsense question answering.
115
+
116
+ Different from their approaches, we integrate ontology knowledge into pre-trained language model fine-tuning with prompts. We propose a novel ontology transformation to enrich the missing knowledge and utilize span-sensitive knowledge injection to mitigate the noisy knowledge. We further optimize those heterogeneous representations with collective training.
117
+
118
+ # 2.2 Few-shot Learning
119
+
120
+ Few-shot learning aims to improve the learning capabilities for machine intelligence, and practical adaptive applications with only a small number of training instances [66]. Our proposed approach corresponds to the other few-shot methods, including: (1) Intermediate training [39, 60], which supplements the pre-trained LMs with further training on the data-rich supervised tasks. (2) Meta-learning [9, 11, 62], in which the quantities of the auxiliary tasks are optimized. (3) Semi-supervised learning [54], which leverages unlabeled samples.
121
+
122
+ 2.2.1 Relation Extraction. Relation extraction aims to identify the relation between entity pairs based on a given contextual text
123
+
124
+ ![](images/be4ba3ca4f041b51d409ed2667f97d17f5400517b8756690dbd00fef5c7bf3c9.jpg)
125
+ Figure 1: Ontology knowledge injection for relation extraction (RE), event extraction (EE) and knowledge graph completion (KGC) (Best viewed in color.).
126
+
127
+ [28, 64]. In order to reduce the cost of labeling, previous studies utilize distant supervision based on the knowledge graph to generate labeled examples automatically [69, 74]. More recent few-shot relation extraction approaches leverage prototype network [8], multi-level matching and aggregation, relational twin network and meta-learning [15].
128
+ 2.2.2 Event Extraction. Event extraction aims to identify event triggers with arguments from text, which is generally formulated as a classification or structured prediction problem [37]. To address the few-shot issue for even extraction, [33] utilize external knowledge from frame net and propose a global inference approach to improve the event detection performance. More recently, [31, 51, 58] reformulate the event extraction task as machine reading comprehension (MRC), which can achieve better performance in few-shot setting than vanilla fine-tuning. Although MRC has the same wide range of application scenarios as our method, we automatically construct templates through ontology knowledge.
129
+ 2.2.3 Knowledge Graph Completion. Knowledge graph completion can be achieved by link prediction of the knowledge graph [72, 76]. Previous researchers have proposed various link prediction methods which encode entities and relations into the low-dimensional continuous space, such as TransE [3], TransR [30], TransD [26], TransH [52], etc. DistMult [56] analyze the matrix and simplify the RESCAL approach, while ComplEx [48] extend DistMult to the complex domain. RotatE [45] define each relation as the transformation and rotation from the source entity to the target entity in the complex space. KG-BERT [57] take the description of the entities and relations in the triple as input and leverage BERT to calculate the score of the triple. For few-shot knowledge graph completion, lots of studies [42, 63, 66] have been proposed. [66] propose a general framework called Weighted Relation Adversarial Network (RAN), which utilizes an adversarial procedure to help adapt knowledge/features learned from high resource relations to different but related low resource relations. [77] propose generalized relation learning (GRL), which utilizes semantic correlations between relations to serve as a bridge to connect semantically similar relations.
130
+
131
+ # 2.3 Prompt-tuning
132
+
133
+ A new fine-tuning methodology named prompt-tuning has arisen: adapting the pre-trained language model directly as a predictor through completion of a cloze task. Prompt-tuning for pre-trained language models is a rapidly emerging field in natural language processing [40, 46, 71] and have attracted lots of attention. Originally from GPT-3, prompt-tuning has been applied to various of tasks including relation extraction [20], event extraction [21, 59], named entity recognition [5, 7], entity typing [13], and so on. To facilitate the labor-intensive prompt engineering, [43] propose AU-TOPROMPT, which can search prompts based on a gradient method to select label words and templates. More recent works including P-tuning [36], Prefix-tuning [29] also propose to leverage continuous templates, which is more effective than discrete prompt search.
134
+
135
+ Recently, some studies have tried to integrate external knowledge into prompt designing. [20] propose an approach called PTR, which leverages logic rules to construct prompts with sub-prompts for many-class text classification. [22] propose an approach to incorporate external knowledge graph into the verbalizer with calibration. [6] propose a knowledge-aware prompt-tuning approach that injects knowledge into prompt template design and answer construction. Different from those approaches, we regard prompts as a bridge between text and knowledge and focus on addressing the issues of knowledge missing, knowledge noise, and knowledge heterogeneity.
136
+
137
+ # 3 METHODOLOGY
138
+
139
+ # 3.1 Preliminaries
140
+
141
+ Our approach OntoPrompt is a general framework that can be applied to widespread applications as shown in Figure 2. We evaluate our approach in three popular tasks, namely, relation extraction (RE), event extraction (EE), and knowledge graph completion (KGC). To inject ontology knowledge, we introduce ontology transformation to enrich and convert structure knowledge into raw texts (ontology text). We regard those raw texts as auxiliary prompts and append them to the input sequences and prompt templates. Note that those ontology texts, including textual descriptions, can provide semantic information about the ontology of the mentioned
142
+
143
+ ![](images/7386a679bbe12e0d8b57afd0167df51adb8b1b389670d2b56dd1bbe354579bfe.jpg)
144
+ (a) Relation Extraction
145
+
146
+ ![](images/dd6639b307b11cc39fd2539a6dc28b2375a0926878ad50cf1b83087de4781305.jpg)
147
+ (b) Event Extraction
148
+
149
+ ![](images/d82d9239cdbc83f1dba74c5eeb127d9568759d0b595258f610ccb0848e899f81.jpg)
150
+ (c) Knowledge Graph Completion
151
+ Figure 2: Illustration of OntoPrompt for relation extraction (RE), event extraction (EE) and knowledge graph completion (KGC). Those texts in blue and green squares are converted knowledgeable texts from ontology.
152
+
153
+ entities/spans. We integrate that knowledge during fine-tuning with span-sensitive knowledge injection to avoid external noise. We further introduce collective training to optimize prompts as well as language models jointly.
154
+
155
+ To facilitate understanding, we first introduce the general framework with prompt-tuning (§ 3.2), ontology transformation (§ 3.3), then introduce span-sensitive knowledge injection (§ 3.4), and finally introduce collective training (§ 3.5).
156
+
157
+ # 3.2 General Framework with Prompt-Tuning
158
+
159
+ Let $X_{\mathrm{in}} = \{x_1, x_2, \dots, x_L\}$ be a sentence, where $x_i$ is the $i^{th}$ token in the input sentence and $L$ is the number of tokens. Specifically, $X_{\mathrm{in}}$ is converted to a fixed token sequence $\tilde{X}_{\mathrm{in}}$ and then mapped to a sequence of hidden vectors $\{\mathbf{h}_k \in \mathbb{R}^d\}$ . Given the input sequence, $\tilde{X}_{\mathrm{in}} = [\mathrm{CLS}]X_{\mathrm{in}}[\mathrm{SEP}]$ , the vanilla fine-tuning approaches leverage a generic head layer over [CLS] embeddings (e.g., an MLP layer) to predict an output class. In this paper, we follow previous prompt-tuning studies [17] and use a task-specific pattern string ([Template] $\mathcal{T}$ ) to coax the model into producing a textual output corresponding to a given class (label token $\mathcal{M}(Y)$ ). Specifically, $X_{\mathrm{prompt}}$ containing one [MASK] token is directly tasked with the MLM input as:
160
+
161
+ $$
162
+ X _ {\text {p r o m p t}} = [ \mathrm {C L S} ] X _ {\text {i n}} [ \mathrm {S E P} ] \mathcal {T} [ \mathrm {S E P} ] \tag {1}
163
+ $$
164
+
165
+ When the prompt is fed into the MLM, the model can obtain the probability distribution $p([MASK])|(X_{\text{prompt}})$ of the candidate class, $y \in Y$ as:
166
+
167
+ $$
168
+ p \left(y \mid X _ {\text {p r o m p t}}\right) = \sum_ {w \in \mathcal {V} _ {y}} p \left(\left[ \text {M A S K} \right] = w \mid X _ {\text {p r o m p t}}\right) \tag {2}
169
+ $$
170
+
171
+ where $w$ represents the $w^{th}$ label token of class $y$ .
172
+
173
+ In this paper, we regard ontology text (obtained from external knowledge graph) as auxiliary prompts to inject knowledge and append them to the input sequence templates as shown in Figure 2. We utilize ontology transformation to construct auxiliary prompts which can inject rich task and instance-related knowledge. To a certain degree, those auxiliary prompts are similar to demonstrations in [17]; however, auxiliary prompts are not instances in the training set but from external knowledge. More details will be introduced in the following sections.
174
+
175
+ # 3.3 Ontology Transformation
176
+
177
+ In this paper, the ontology representation is denoted as $\mathcal{O} = \{\mathcal{C},\mathcal{E},\mathcal{D}\}$ , $\mathcal{C}$ is a set of concepts, $\mathcal{E}$ is the connected edge between the ontologies, and $\mathcal{D}$ is the textual description of each ontology (The ontological schema contains a paragraph of textual descriptions, and they are lexically meaningful information of concepts, which can also be represented by triples using properties, e.g., rdfs:comment.). The concept set here refers to domain-specific concepts. For example,
178
+
179
+ ![](images/75fd94e51859e6d389e9f5670e34b72d8b4224b2711cdf500feffb97beaf8e74.jpg)
180
+ Figure 3: Illustration of span-sensitive knowledge injection. In this example, Ontology #1 (red) describes the ontology of the mentioned token starting at position 1, Meta-relation (green) describes the meta-relation path between related ontology pair, and Template (cyan) refers to the template for the prompt. (Best viewed in color.)
181
+
182
+ we leverage the type ontology related to the mentioned entity in relation extraction and event extraction. We utilize the domain (i.e., head entity types) and range (i.e., tail entity types) constraints in knowledge graph completion.
183
+
184
+ Regarding the differences in downstream tasks, we leverage different sources of ontology for each task for ontology transformation. We first extract the ontology of each instance from external knowledge graphs and then transform those ontologies (rdfs:comment) into raw texts as auxiliary prompts.
185
+
186
+ 3.3.1 Applying to Relation Extraction. We utilize the MUC (Message Understanding Conference) [49] which defines the concept of named entities. Note that named entities can provide important type information, which is beneficial for relation extraction. We utilize those definitions as textual descriptions in ontology schema. Specifically, we use $[CLS] < \text{InputText} > [SEP] < \text{Template} > [SEP] < \text{OntologyText} > [SEP]$ as the final input sequence. We construct placeholders for entities in the $<\text{OntologyText}>$ and replace those placeholders with external textual descriptions. Moreover, we leverage the path between entity pairs from the ontology as meta-relation text to enhance the $<\text{OntologyText}>$ as shown in Figure 2.
187
+
188
+ We further add learnable tokens as virtual tokens on both sides of the [MASK] so that the model can automatically learn the most suitable words for the prompt. Specifically, we use several unused tokens<sup>1</sup> [u1]-[u4] (e.g., unused or special token in the vocabulary) for virtual tokens.
189
+
190
+ 3.3.2 Applying to Event Extraction. We follow the work of [25] to build the ontology of event extraction. We first leverage the ACE event schema ${}^{2}$ as our base event ontology. The ACE2005 corpus includes the rich event annotations currently available for 33 types. However, in real-world scenarios, there may be thousands of types of events. In order to facilitate inference in scenarios with limited training samples, we integrate the 33 ACE event types and argument roles with 1,161 frames from FrameNet and construct a bigger event
191
+
192
+ ontology for event extraction. We manually map the selected frames to event ontology. Similarly, we use $[CLS] < InputText > [SEP] < Template > [SEP] < OntologyText > [SEP]$ as the default format for the input sequence. We construct placeholders for triggers and type in the $<$ OntologyText $>$ .
193
+
194
+ Similar to relation extraction, we also leverage the learnable virtual token to enhance the prompt representation.
195
+
196
+ 3.3.3 Applying to Knowledge Graph Completion. We use the corresponding items obtained from the external Wikidata query as the source of ontology and extract the textual description<sup>3</sup>. Following KG-BERT [57], we regard knowledge graph completion as a triple classification task and concatenate entities and relations as an input sequence. Similar to the above-mentioned tasks, we use $[CLS] < \text{InputText} > [SEP] < \text{Template} > [SEP] < \text{OntologyText} > [SEP]$ as the default input sequence.
197
+
198
+ We also use the learnable virtual token to enhance the prompt representation. In inference, we rank the output scores according to the maximum probability of sentences predicted by the language model and count the hits of gold standard entities to evaluate the performance of the approach.
199
+
200
+ # 3.4 Span-sensitive Knowledge Injection
201
+
202
+ As observed by [34], excessive knowledge injection may introduce some noise into the original text and cause a performance decay. In order to solve this problem, we propose span-sensitive knowledge injection as shown in Figure 3. Given input text $X_{in} = [x_1, x_2, \dots, x_L]$ with $L$ tokens, we use a visibility matrix to limit the impact of the knowledge injection on the input text. In the language model architecture, the attention mask matrix with self-attention weight is added before the softmax layer. Therefore, we modify the attention mask matrix $M$ as follows:
203
+
204
+ $$
205
+ M _ {i j} = \left\{ \begin{array}{l l} 0 & x _ {i}, x _ {j} \in x \\ 0 & x _ {i}, x _ {j} \in x ^ {o} \\ 0 & x _ {i} \in x, x _ {j} \in x ^ {o} \text {a n d} i = p _ {k} \\ - \infty & \text {o t h e r w i s e} \end{array} \right. \tag {3}
206
+ $$
207
+
208
+ Table 1: Relation Extraction Dataset statistics.
209
+
210
+ <table><tr><td>Dataset</td><td>#Relations</td><td>#Triples</td><td>#No-relation</td></tr><tr><td>SemEval-2010 Task 8</td><td>19</td><td>10,717</td><td>17.4%</td></tr><tr><td>TACRED-Revisit</td><td>42</td><td>106,264</td><td>79.5%</td></tr><tr><td>Wiki80</td><td>80</td><td>56,000</td><td>-</td></tr><tr><td>DialogRE</td><td>36</td><td>1,788</td><td>-</td></tr></table>
211
+
212
+ Table 2: Knowledge Graph Completion Dataset statistics.
213
+
214
+ <table><tr><td>Dataset</td><td>#Relations</td><td>#Entities</td><td>#Train</td><td>#Dev</td><td>#Test</td></tr><tr><td>FB15k-237</td><td>237</td><td>14,541</td><td>1,896</td><td>17,535</td><td>2,000</td></tr><tr><td>WN18RR</td><td>18</td><td>40,943</td><td>88</td><td>3,034</td><td>3,134</td></tr><tr><td>UMLS</td><td>46</td><td>135</td><td>329</td><td>652</td><td>661</td></tr></table>
215
+
216
+ where $x_{i}$ and $x_{j}$ are tokens from $x_{in}$ and ontology text, separately. $-\infty$ in attention mask matrix $M_{ij}$ blocks token $i$ from attending to token $j$ and 0 allows token $i$ to attend to token $j$ . $x_{i}$ can attend to $x_{j}$ if: both tokens belong to the $x_{input}$ , or both tokens belong to the text description of the same ontology $x_{o}$ , or $x_{i}$ is the token at the span position of entity $e_{i}$ in $x_{input}$ and $x_{j}$ is from its ontology description text $x^{o}$ . $p_{k}$ indicates the position of the mentioned span (e.g., entities in relation extraction and knowledge graph completion, triggers, or arguments in event extraction) in the input text.
217
+
218
+ # 3.5 Collective Training
219
+
220
+ Note that those injected tokens from ontology transformation should be associated with the input sequence. Thus, we introduce collective training to optimize ontology tokens and input sequence. First, we initialize the ontology tokens with real word embeddings and optimize those ontology tokens with the language model fixed. Then, we optimize all the parameters of the model, including the language model and ontology tokens. Note that our model does not rely on the specific model architecture; thus, it can be plugged into any kind of pre-trained language model like BERT, BART.
221
+
222
+ # 4 EXPERIMENTS
223
+
224
+ In this section, we introduce the extensive experiments of OntoPrompt in three tasks: relation extraction, event extraction, and knowledge graph completion. The experimental results show that OntoPrompt can perform better than baselines in both the standard supervised and few-shot settings.
225
+
226
+ # 4.1 Datasets
227
+
228
+ For relation extraction, we choose a variety of datasets to evaluate OntoPrompt, including sentence-level extraction datasets such as TACRED-Revisit [1], SemEval-2010 Task 8, Wiki80 and dialogue-level extraction dataset DialogRE [61]. The detailed statistics of each relation extraction dataset are shown in Table 1.
229
+
230
+ For event extraction, we evaluate our OntoPrompt model with the ACE 2005 dataset [14], which defines 33 different event types and 35 semantic roles. We use the same data split and pre-processing
231
+
232
+ Table 3: Results on relation extraction dataset for OntoPrompt (F1 score). We report the results of fine-tuning BERT Large with entity markers. We use $K = 8, 16, 32$ (# examples per class). Full represents the full training set is used.
233
+
234
+ <table><tr><td>Dataset</td><td>Model</td><td>K=8</td><td>K=16</td><td>K=32</td><td>Full</td></tr><tr><td rowspan="3">SemEval</td><td>Fine-tuning</td><td>24.8</td><td>43.5</td><td>63.9</td><td>87.4</td></tr><tr><td>GDPNET</td><td>25.3</td><td>43.5</td><td>65.2</td><td>88.0</td></tr><tr><td>OntoPrompt</td><td>52.6 (+27.3)</td><td>65.0 (+21.5)</td><td>83.0 (+17.8)</td><td>89.1 (+1.1)</td></tr><tr><td rowspan="3">TACRED-Revisit</td><td>Fine-tuning</td><td>7.4</td><td>15.3</td><td>25.4</td><td>74.8</td></tr><tr><td>GDPNET</td><td>7.9</td><td>17.8</td><td>26.1</td><td>77.2</td></tr><tr><td>OntoPrompt</td><td>28.8 (+20.9)</td><td>33.1 (+15.3)</td><td>34.8 (+8.7)</td><td>78.2 (+1.0)</td></tr><tr><td rowspan="3">WiKi80</td><td>Fine-tuning</td><td>46.1</td><td>60.5</td><td>70.1</td><td>85.1</td></tr><tr><td>GDPNET</td><td>47.4</td><td>62.3</td><td>70.5</td><td>87.0</td></tr><tr><td>OntoPrompt</td><td>68.7 (+21.3)</td><td>75.6 (+13.3)</td><td>79.1 (+8.6)</td><td>87.9 (+0.9)</td></tr><tr><td rowspan="3">DialogRE</td><td>Fine-tuning</td><td>29.3</td><td>41.1</td><td>49.5</td><td>58.3</td></tr><tr><td>GDPNET</td><td>20.1</td><td>42.5</td><td>49.7</td><td>65.2</td></tr><tr><td>OntoPrompt</td><td>41.5 (+12.2)</td><td>47.3 (+4.8)</td><td>52.4 (+2.7)</td><td>66.1 (+0.9)</td></tr></table>
235
+
236
+ step following [50] in which 529/30/40 newswire documents are used for training/dev/test set.
237
+
238
+ For knowledge graph completion, we use several standard knowledge graph completion datasets, including UMLS which has various categories, WN18RR based on WordNet, and FB15K-237 based on Freebase. FB15K is a subset of the large-scale knowledge graph Freebase. In FB15K-237, the triples that can be reversed are deleted because these triples are more challenging to distinguish. Compared with FB15K, WN18RR has more entities and fewer types of relations. Since the UMLS dataset is a vertical domain dataset, there are fewer entities than the previous two knowledge graphs. Detailed statics are shown in Table 2.
239
+
240
+ # 4.2 Settings
241
+
242
+ The proposed model is implemented using Pytorch. Our experiments measure the average performance with a fixed set of seeds, $S_{seed}$ , across five different sampled $D_{train}$ for each task. We utilize a grid search over multiple hyperparameters and select the best result as measured on $D_{dev}$ for each set $\{D_{train}^s, D_{dev}\}, s \in S_{seed}$ . We employ AdamW as the optimizer. We conduct experiments with a BERT_LARGE model for all experiments. We use special entity markers uniformly to highlight the entity mentions for relation extraction. For the few-shot learning, we follow the settings of [17], which is different from the N-way K-shot setting. We construct prompt templates following [16].
243
+
244
+ # 4.3 Results
245
+
246
+ For relation extraction, we compare OntoPrompt with vanilla fine-tuning approach and the most well-performed baseline model GDPNET [55] on four datasets. As shown in Table 3, it can be found that the OntoPrompt model under the few-shot setting can achieve better performance than the vanilla fine-tuning method in all relation extraction datasets. In a fully supervised setting, OntoPrompt can obtain an average increase of about $1.0\%$ compared with GDPNET. Note that the implementation of OntoPrompt is relatively simple. We believe that our model can be plugged into different models to promote the performance of relation extraction tasks in more complex scenarios.
247
+
248
+ Table 4: Knowledge graph completion result (8-shot) on WN18RR, FB15K-237 and UMLS. The best score is in bold.
249
+
250
+ <table><tr><td rowspan="2">Model</td><td colspan="2">WN18RR</td><td colspan="2">FB15K-237 (mini)</td><td colspan="2">UMLS</td></tr><tr><td>MR ↓</td><td>Hit@10 ↑</td><td>MR ↓</td><td>Hit@10 ↑</td><td>MR ↓</td><td>Hit@10 ↑</td></tr><tr><td>TransE [3]</td><td>19313.0</td><td>0.0004</td><td>5847.4</td><td>0.0925</td><td>38.5</td><td>0.3271</td></tr><tr><td>TransR [30]</td><td>20861.0</td><td>0.0023</td><td>5970.5</td><td>0.0903</td><td>43.2</td><td>0.3079</td></tr><tr><td>TransD [26]</td><td>20202.5</td><td>0.0006</td><td>5923.1</td><td>0.0901</td><td>43.7</td><td>0.3030</td></tr><tr><td>TransH [52]</td><td>19272.9</td><td>0.0015</td><td>6102.3</td><td>0.0802</td><td>41.4</td><td>0.3166</td></tr><tr><td>DistMult [56]</td><td>20671.6</td><td>0.0003</td><td>5791.7</td><td>0.0059</td><td>59.3</td><td>0.1716</td></tr><tr><td>Complex [48]</td><td>20318.6</td><td>0.0000</td><td>6451.8</td><td>0.0046</td><td>61.3</td><td>0.2260</td></tr><tr><td>RotatE [45]</td><td>20162.1</td><td>0.0003</td><td>7365.8</td><td>0.0066</td><td>68.4</td><td>0.0526</td></tr><tr><td>KG-BERT [57]</td><td>2937.2</td><td>0.1175</td><td>2023.4</td><td>0.0451</td><td>34.6</td><td>0.3382</td></tr><tr><td>RAN [66]</td><td>3150.2</td><td>0.0769</td><td>3320.5</td><td>0.0072</td><td>34.2</td><td>0.3226</td></tr><tr><td>GRL [77]</td><td>2913.3</td><td>0.0900</td><td>2913.5</td><td>0.0300</td><td>34.5</td><td>0.3312</td></tr><tr><td>OntoPrompt</td><td>1442.6 (-1470.7)</td><td>0.1344 (+0.0169)</td><td>714.5 (-1308.9)</td><td>0.111 (+0.0185)</td><td>27.2 (-7.0)</td><td>0.3448 (+0.0066)</td></tr></table>
251
+
252
+ Table 5: F1 score (\%) on few-shot learning. The reported F1 score refers to the result of the event argument classification (event extraction) in ACE2005.
253
+
254
+ <table><tr><td>Model</td><td>1%</td><td>5%</td><td>10%</td><td>20%</td><td>Full</td></tr><tr><td>dbRNN [41]</td><td>-</td><td>8.1</td><td>17.2</td><td>24.1</td><td>58.7</td></tr><tr><td>JMEE [35]</td><td>-</td><td>8.9</td><td>20.3</td><td>28.4</td><td>60.3</td></tr><tr><td>DYGIE++ [50]</td><td>-</td><td>5.3</td><td>15.7</td><td>23.4</td><td>51.4</td></tr><tr><td>MQAEE [51]</td><td>5.2</td><td>27.3</td><td>32.1</td><td>38.1</td><td>53.4</td></tr><tr><td>TEXT2EVENT [38]</td><td>3.4</td><td>19.8</td><td>25.3</td><td>36.9</td><td>49.8</td></tr><tr><td>OntoPrompt</td><td>25.6 (+20.4)</td><td>40.1 (+12.8)</td><td>47.8 (+15.7)</td><td>50.1 (+12.0)</td><td>55.3 (-5.0)</td></tr></table>
255
+
256
+ ![](images/39a888c0a1965c695e9659ab864c3838649a8bf3d4b1bbedf7611c5c6bf0be2e.jpg)
257
+ Figure 4: Ablation study results of OntoPrompt.
258
+
259
+ For event extraction $^{4}$ , from Table 5, we report the F1 score results of event extraction in extremely low-resource scenarios (training with less than $20\%$ data, with the similar setting to [31]. Notably, OntoPrompt yields advantages in few-shot event extraction. To be specific, OntoPrompt can obtain $25.6\%$ F1 with $1\%$ data, in comparison to $5.2\%$ in MQAEE and $3.4\%$ in TEXT2EVENT. Although the performance of OntoPrompt on the full sample is slightly weaker than that of JMEE, which relies on external data augmentation, we believe that OntoPrompt can effectively identify triggers and
260
+
261
+ arguments with less data dependence. Besides, we notice that OntoPrompt has a faster convergence speed (See details in appendix). Compared with MQAEE and TEXT2EVENT, which need a certain amount of data to fine-tune the language model, OntoPrompt only leverages $20\%$ of the training data to achieve similar satisfactory performance.
262
+
263
+ For knowledge graph completion<sup>5</sup>, from Table 4, we report the performance of multiple knowledge graph completion approaches in the few-shot setting (8 samples per relationship, 8-shot). The experimental results show that OntoPrompt can achieve the best performance, proving our proposed model's superiority. Although OntoPrompt has a slight improvement in the few-shot setting on UMLS, it has an increase of $6.6\%$ with the hits@10 on FB15K-237 compared to KG-BERT. We think the improvement of FB15K-237 is mainly due to OntoPrompt's ability to fully utilize the implicit fact knowledge obtained from external ontology.
264
+
265
+ Although we evaluate OntoPrompt with the above three tasks, our proposed approach can also be applied to other tasks such as text classification, question answering with suitable ontologies.
266
+
267
+ # 4.4 Ablation Study
268
+
269
+ To further prove the effects of different modules in OntoPrompt, we conduct the ablation study and report the experimental results in Figure 4. w/o span-sensitive $KI$ indicate the model without the visible matrix; thus, all tokens can see each other. w/o span-sensitive $KI$ & collective training refers to the model without the visible matrix, and all parameters are optimized. We observe that all models without each component have a performance decay, which demonstrates the effectiveness of our approach.
270
+
271
+ # 4.5 Case Study
272
+
273
+ To further analyze the collective training of virtual tokens, we conduct the nearest neighbor word embedding search for the virtual token in the sentence to project the best optimized virtual token into a readable natural language. From Table 6, we can observe the following findings:
274
+
275
+ 1) The OntoPrompt can customize a unique virtual token for each ontology in different event subtypes, while the virtual tokens of the
276
+
277
+ Table 6: Case study results on the ACE2005 dataset. We map the virtual token into the readable natural language (red) in the input examples, demonstrating the intuitiveness of OntoPrompt. It is worth noting that OntoPrompt can customize a unique virtual token for each prompt based on ontology with different event subtypes. In contrast, the virtual tokens of models without knowledge injection are randomly scattered over the entire dataset.
278
+
279
+ <table><tr><td>Model</td><td>Input Example</td><td>Event Type</td><td>Model F1</td></tr><tr><td>BERT</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP]</td><td>Personnel.Elect</td><td>49.1</td></tr><tr><td>BERT + ONTOLOGY</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP] Event trigger: Event trigger (trigger) is a word that can trigger the occurrence of an event ... [SEP]</td><td>Personnel.Elect</td><td>50.2</td></tr><tr><td>ONTOPROMPT (-ONTO)</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP] Event trigger is &lt;Trigger&gt;, event type is &lt;Type&gt;, argument are [prohibits] [lunged] [MASK] [relics] [lunged] ... [SEP] Person: named person or family ... [SEP]</td><td>Personnel.Elect</td><td>52.3</td></tr><tr><td>ONTOPROMPT (-VIR)</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP] Event trigger is [MASK], event type is [MASK], argument are [MASK] ... [SEP] Person: The Person is conceived of as independent of other specific individuals with whom they have relationships and ... [SEP]</td><td>Personnel.Elect</td><td>52.9</td></tr><tr><td>ONTOPROMPT (-BOTH)</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP] Event trigger is [MASK], event type is [MASK], argument are [MASK] ... [SEP]</td><td>Personnel.Elect</td><td>47.5</td></tr><tr><td rowspan="3">ONTOPROMPT</td><td>[CLS] Everyone, everywhere should have the basic right to elect their government. [SEP] Event trigger is [MASK], event type is [MASK], argument are [discusses] [receptionist] [MASK] [moaned] [teachings] ... [SEP] Person: the person elected. The Person is conceived of ... [SEP]</td><td>Personnel.Elect</td><td rowspan="3">55.3</td></tr><tr><td>[CLS] Here, prisoners were tortured. [SEP] Event trigger is [MASK], event type is [MASK], argument are [focuses] [ethiopia] [MASK] [brownish] [explores] ... [SEP] Place: where the injuring takes place. Place designates relatively stable bounded ares of the world which have permanent relative ... [SEP]</td><td>Life.Injure</td></tr><tr><td>[CLS] But it has nothing to do whether we should go to war with Iraq or not. [SEP] Event trigger is [MASK], event type is [MASK], argument are [angrily] [iraqi] [MASK] [hibits] [reddish] ... [SEP] Attacker: The attacking/instigating agent. An assailant physically attacks a Victim ... [SEP]</td><td>Conflict.Attack</td></tr></table>
280
+
281
+ model without ontology knowledge are random tokens scattered in the entire dataset. This indicates that injecting external knowledge can benefit prompt template representation.
282
+
283
+ 2) The integration of ontology knowledge can improve the base pre-trained model to varying degrees, e.g., BERT $(+1.1\%)$ and OntoPrompt $(+3.0\%)$ , which further prove that the ontology knowledge is helpful to the downstream tasks such as event extraction.
284
+ 3) The removal of virtual template tokens reduces our OntoPrompt performance $(-2.4\%)$ . We believe that the virtual token can learn the implicit information of the task-specific knowledge and adjust the appropriate prompt according to the context. At the same time, the removal of the virtual template token and the ontology knowledge module both will result in a more severe performance decay $(-7.8\%)$ , which demonstrates that the virtual template token and the ontology knowledge information both have a positive impact on the model performance.
285
+
286
+ # 4.6 Discussion
287
+
288
+ Notably, our OntoPrompt can be viewed as an approach to leverage prompts as a bridge to inject knowledge. Previous studies such as RAG [27] has the same intuition to retrieve and concatenate relevant texts as knowledge. That knowledge can be directly revised and expanded, and accessed knowledge can be inspected and interpreted. Apart from those approaches, we argue that prompts transformed from ontology can consist of more density knowledge. Moreover, we utilize span-sensitive knowledge injection to filter noisy information. However, our model cannot handle those complex or structure knowledge such as OWL reasoning rules,
289
+
290
+ description logic and represent them as raw texts. We will leave this for future works.
291
+
292
+ # 5 CONCLUSION AND FUTURE WORK
293
+
294
+ We focus on ontology knowledge injection and propose an ontology-enhanced prompt-tuning (OntoPrompt) approach, which can be applied to relation extraction, event extraction, and knowledge graph completion tasks. Experimental results illustrate that OntoPrompt can obtain better results than baselines in eight datasets. The method in this paper verifies the effectiveness of ontology knowledge as prompt guidance.
295
+
296
+ In the future, we plan to apply our approach to more applications such as text generation, question answering. We will also try to combine the proposed method with semi-supervised learning algorithms to make better use of large amounts of unlabeled data. In addition, we will try to inject the more complex knowledge such as symbolic rules in the knowledge graph into the proposed model to construct more powerful prompt templates.
297
+
298
+ # ACKNOWLEDGMENTS
299
+
300
+ We want to express gratitude to the anonymous reviewers for their hard work and kind comments. This work is funded by National Key R&D Program of China (Funding No.SQ2018YFC000004), NSFC91846204/NSFCU19B2027, Zhejiang Provincial Natural Science Foundation of China (No. LGG22F030011), Ningbo Natural Science Foundation (2021J190), and Yongjiang Talent Introduction Programme.
301
+
302
+ # REFERENCES
303
+
304
+ [1] Christoph Alt, Aleksandra Gabryszak, and Leonhard Hennig. 2020. TACRED Revisited: A Thorough Evaluation of the TACRED Relation Extraction Task. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetraault (Eds.). Association for Computational Linguistics, 1558-1569. https://doi.org/10.18653/v1/2020.acl-main.142
305
+ [2] Ning Bian, Xianpei Han, Bo Chen, and Le Sun. 2021. Benchmarking Knowledge-Enhanced Commonsense Question Answering via Knowledge-to-Text Transformation. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021. AAAI Press, 12574-12582. https://ojs.aaai.org/index.php/AAAI/article/view/17490
306
+ [3] Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating Embeddings for Modeling Multi-relational Data. In Advances in Neural Information Processing Systems 26: 27th Annual Conference on Neural Information Processing Systems 2013. Proceedings of a meeting held December 5-8, 2013, Lake Tahoe, Nevada, United States, Christopher J. C. Burges, Leon Bottou, Zoubin Ghahramani, and Kilian Q. Weinberger (Eds.). 2787-2795. https://proceedings.neurips.cc/paper/2013/bit/1cecc7a77928ca8133fa24680a88d2f9-Abstract.html
307
+ [4] Jiaoyan Chen, Yuxia Geng, Zhuo Chen, Jeff Z. Pan, Yuan He, Wen Zhang, Ian Horrocks, and Huajun Chen. 2021. Low-resource Learning with Knowledge Graphs: A Comprehensive Survey. CoRR abs/2112.10006 (2021). arXiv:2112.10006 https://arxiv.org/abs/2112.10006
308
+ [5] Xiang Chen, Ningyu Zhang, Lei Li, Xin Xie, Shumin Deng, Chuanqi Tan, Fei Huang, Luo Si, and Huajun Chen. 2021. LightNER: A Lightweight Generative Framework with Prompt-guided Attention for Low-resource NER. arXiv preprint arXiv:2109.00720 (2021).
309
+ [6] Xiang Chen, Ningyu Zhang, Xin Xie, Shumin Deng, Yunzhi Yao, Chuanqi Tan, Fei Huang, Luo Si, and Huajun Chen. 2021. KnowPrompt: Knowledge-aware Prompt-tuning with Synergistic Optimization for Relation Extraction. CoRR abs/2104.07650 (2021). arXiv:2104.07650 https://arxiv.org/abs/2104.07650
310
+ [7] Leyang Cui, Yu Wu, Jian Liu, Sen Yang, and Yue Zhang. 2021. Template-Based Named Entity Recognition Using BART. In Findings of the Association for Computational Linguistics: ACL/TJCNLP 2021, Online Event, August 1-6, 2021 (Findings of ACL, Vol. ACL/TJCNLP 2021), Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 1835-1845. https://doi.org/10.18653/v1/2021.findings-acl.161
311
+ [8] Shumin Deng, Ningyu Zhang, Hui Chen, Chuanqi Tan, Fei Huang, Changliang Xu, and Huajun Chen. 2021. Low-resource extraction with knowledge-aware pairwise prototype learning. Knowledge-Based Systems (2021), 107584. https://doi.org/10.1016/j.knosys.2021.107584
312
+ [9] Shumin Deng, Ningyu Zhang, Jiaojian Kang, Yichi Zhang, Wei Zhang, and Huajun Chen. 2020. Meta-Learning with Dynamic-Memory-Based Prototypical Network for Few-Shot Event Detection. In WSDM '20: The Thirteenth ACM International Conference on Web Search and Data Mining, Houston, TX, USA, February 3-7, 2020, James Caverlee, Xia (Ben) Hu, Mounia Lamas, and Wei Wang (Eds.). ACM, 151-159. https://doi.org/10.1145/3336191.3371796
313
+ [10] Shumin Deng, Ningyu Zhang, Luoqiu Li, Chen Hui, Huaxiao Tou, Mosha Chen, Fei Huang, and Huajun Chen. 2021. OntoED: Low-resource Event Detection with Ontology Embedding. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/TJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 2828-2839. https://doi.org/10.18653/v1/2021.acl-long.220
314
+ [11] Shumin Deng, Ningyu Zhang, Zhanlin Sun, Jiaoyan Chen, and Huajun Chen. 2020. When Low Resource NLP Meets Unsupervised Language Model: Meta-Pretraining then Meta-Learning for Few-Shot Text Classification (Student Abstract). In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020. AAAI Press, 13773-13774. https://aaai.org/ods/index.php/AAAI/article/view/7158
315
+ [12] Shumin Deng, Ningyu Zhang, Wen Zhang, Jiaoyan Chen, Jeff Z. Pan, and Huajun Chen. 2019. Knowledge-Driven Stock Trend Prediction and Explanation via Temporal Convolutional Network. In *Companion of The 2019 World Wide Web Conference*, WWW 2019, San Francisco, CA, USA, May 13-17, 2019, Sihem Amer-Yahia, Mohammad Mahdian, Ashish Goel, Geert-Jan Houben, Kristina Lerman, Julian J. McAuley, Ricardo Baeza-Yates, and Leila Zia (Eds.). ACM, 678-685. https://doi.org/10.1145/3308560.3317701
316
+ [13] Ning Ding, Yulin Chen, Xu Han, Guangwei Xu, Pengjun Xie, Hai-Tao Zheng, Zhiyuan Liu, Juanzi Li, and Hong-Gee Kim. 2021. Prompt-Learning for Fine-Grained Entity Typing. CoRR abs/2108.10604 (2021). arXiv:2108.10604 https://arxiv.org/abs/2108.10604
317
+
318
+ [14] George R. Doddington, Alexis Mitchell, Mark A. Przybocki, Lance A. Ramshaw, Stephanie M. Strassel, and Ralph M. Weischedel. 2004. The Automatic Content Extraction (ACE) Program - Tasks, Data, and Evaluation. In Proceedings of the Fourth International Conference on Language Resources and Evaluation, LREC 2004, May 26-28, 2004, Lisbon, Portugal. European Language Resources Association. http://www.lrec-conf.org/proceedings/lrec2004/summaries/5.htm
319
+ [15] Bowen Dong, Yuan Yao, Ruobing Xie, Tianyu Gao, Xu Han, Zhiyuan Liu, Fen Lin, Leyu Lin, and Maosong Sun. 2020. Meta-Information Guided Meta-Learning for Few-Shot Relation Classification. In Proceedings of the 28th International Conference on Computational Linguistics, COLING 2020, Barcelona, Spain (Online), December 8-13, 2020, Donia Scott, Núria Bel, and Chengqing Zong (Eds.). International Committee on Computational Linguistics, 1594-1605. https://doi.org/10.18653/v1/2020.coling-main.140
320
+ [16] Tianyu Gao, Adam Fisch, and Danqi Chen. 2020. Making Pre-trained Language Models Better Few-shot Learners. CoRR abs/2012.15723 (2020). arXiv:2012.15723 https://arxiv.org/abs/2012.15723
321
+ [17] Tianyu Gao, Adam Fisch, and Danqi Chen. 2021. Making Pre-trained Language Models Better Few-shot Learners. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/TJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 3816-3830. https://doi.org/10.18653/v1/2021.acl-long.295
322
+ [18] Yuxia Geng, Jiaoyan Chen, Zhuo Chen, Jeff Z. Pan, Zhiquan Ye, Zonggang Yuan, Yantao Jia, and Huajun Chen. 2021. OntoZSL: Ontology-enhanced Zero-shot Learning. In WWW '21: The Web Conference 2021, Virtual Event / Ljubljana, Slovenia, April 19-23, 2021, Jure Leskovec, Marko Grobelnik, Marc Najork, Jie Tang, and Leila Zia (Eds.). ACM / IW3C2, 3325-3336. https://doi.org/10.1145/3442381.3450042
323
+ [19] Yuxian Gu, Xu Han, Zhiyuan Liu, and Minnie Huang. 2021. PPT: Pretrained Prompt Tuning for Few-shot Learning. CoRR abs/2109.04332 (2021). arXiv:2109.04332 https://arxiv.org/abs/2109.04332
324
+ [20] Xu Han, Weilin Zhao, Ning Ding, Zhiyuan Liu, and Maosong Sun. 2021. PTR: Prompt Tuning with Rules for Text Classification. CoRR abs/2105.11259 (2021). arXiv:2105.11259 https://arxiv.org/abs/2105.11259
325
+ [21] I Hsu, Kuan-Hao Huang, Elizabeth Boschee, Scott Miller, Prem Natarajan, Kai-Wei Chang, Nanyun Peng, et al. 2021. Event extraction as natural language generation. arXiv preprint arXiv:2108.12724 (2021).
326
+ [22] Shengding Hu, Ning Ding, Huadong Wang, Zhiyuan Liu, Juanzi Li, and Maosong Sun. 2021. Knowledgeable Prompt-tuning: Incorporating Knowledge into Prompt Verbalizer for Text Classification. CoRR abs/2108.02035 (2021). arXiv:2108.02035 https://arxiv.org/abs/2108.02035
327
+ [23] Yang Hu, Adriane Chapman, Guihua Wen, and Wendy Hall. 2021. What Can Knowledge Bring to Machine Learning? - A Survey of Low-shot Learning for Structured Data. CoRR abs/2106.06410 (2021). arXiv:2106.06410 https://arxiv.org/abs/2106.06410
328
+ [24] Lifu Huang, Heng Ji, Kyunghyun Cho, Ido Dagan, Sebastian Riedel, and Clare R. Voss. 2018. Zero-Shot Transfer Learning for Event Extraction. In ACL (1). Association for Computational Linguistics, 2160-2170. https://www.aclweb.org/anthology/P18-1201/
329
+ [25] Lifu Huang, Heng Ji, Kyunghyun Cho, Ido Dagan, Sebastian Riedel, and Clare R. Voss. 2018. Zero-Shot Transfer Learning for Event Extraction. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018, Melbourne, Australia, July 15-20, 2018, Volume 1: Long Papers, Iryna Gurevych and Yusuke Miyao (Eds.). Association for Computational Linguistics, 2160-2170. https://doi.org/10.18653/v1/P18-1201
330
+ [26] Guoliang Ji, Shizhu He, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Knowledge Graph Embedding via Dynamic Mapping Matrix. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing of the Asian Federation of Natural Language Processing, ACL 2015, July 26-31, 2015, Beijing, China, Volume 1: Long Papers. The Association for Computer Linguistics, 687-696. https://doi.org/10.3115/v1/p15-1067
331
+ [27] Patrick S. H. Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurlIPS 2020, December 6-12, 2020, virtual, Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (Eds.). https://proceedings.neurips.cc/paper/2020/bitnet/6b493230205f780e1bc26945df7481e5-AbsAbstract.html
332
+ [28] Luoqiu Li, Xiang Chen, Hongbin Ye, Zhen Bi, Shumin Deng, Ningyu Zhang, and Huajun Chen. 2021. On Robustness and Bias Analysis of BERT-Based Relation Extraction. In Knowledge Graph and Semantic Computing: Knowledge Graph Empowers New Infrastructure Construction, Bing Qin, Zhi Jin, Haofen Wang, Jeff Pan, Yongbin Liu, and Bo An (Eds.). Springer Singapore.
333
+
334
+ [29] Xiang Lisa Li and Percy Liang. 2021. Prefix-Tuning: Optimizing Continuous Prompts for Generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/TJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 4582-4597. https://doi.org/10.18653/v1/2021.acl-long.353
335
+ [30] Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015. Learning Entity and Relation Embeddings for Knowledge Graph Completion. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, January 25-30, 2015, Austin, Texas, USA, Blai Bonet and Sven Koenig (Eds.). AAAI Press, 2181-2187. http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9571
336
+ [31] Jian Liu, Yubo Chen, Kang Liu, Wei Bi, and Xiaojiang Liu. 2020. Event Extraction as Machine Reading Comprehension. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, November 16-20, 2020, Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, 1641-1651. https://doi.org/10.18653/v1/2020.emnlp-main.128
337
+ [32] Kun Liu, Yao Fu, Chuanqi Tan, Mosha Chen, Ningyu Zhang, Songfang Huang, and Sheng Gao. 2021. Noisy-Labeled NER with Confidence Estimation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2021, Online, June 6-11, 2021, Kristina Toutanova, Anna Rumshisky, Luke Zettlemoyer, Dilek Hakkani-Tur, Iz Beltagy, Steven Bethard, Ryan Cotterell, Tanmoy Chakraborty, and Yichao Zhou (Eds.). Association for Computational Linguistics, 3437-3445. https://doi.org/10.18653/v1/2021.nacl-main.269
338
+ [33] Shulin Liu, Yubo Chen, Shizhu He, Kang Liu, and Jun Zhao. 2016. Leveraging FrameNet to Improve Automatic Event Detection. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, ACL 2016, August 7-12, 2016, Berlin, Germany, Volume 1: Long Papers. The Association for Computer Linguistics. https://doi.org/10.18653/v1/p16-1201
339
+ [34] Weijie Liu, Peng Zhou, Zhe Zhao, Zhiruo Wang, Qi Ju, Haotang Deng, and Ping Wang. 2020. K-BERT: Enabling Language Representation with Knowledge Graph. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020. AAAI Press, 2901-2908. https://aaai.org/ods/index.php/AAAI/article/view/5681
340
+ [35] Xiao Liu, Zhunchen Luo, and Heyan Huang. 2018. Jointly Multiple Events Extraction via Attention-based Graph Information Aggregation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 - November 4, 2018, Ellen Riloff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsujii (Eds.). Association for Computational Linguistics, 1247-1256. https://doi.org/10.18653/v1/d18-1156
341
+ [36] Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. 2021. GPT Understands, Too. CoRR abs/2103.10385 (2021). arXiv:2103.10385 https://arxiv.org/abs/2103.10385
342
+ [37] Dongfang Lou, Zhilin Liao, Shumin Deng, Ningyu Zhang, and Huajun Chen. 2021. MLBiNet: A Cross-Sentence Collective Event Detection Network. arXiv preprint arXiv:2105.09458 (2021).
343
+ [38] Yaojie Lu, Hongyu Lin, Jin Xu, Xianpei Han, Jialong Tang, Annan Li, Le Sun, Meng Liao, and Shaoyi Chen. 2021. Text2Event: Controllable Sequence-to-Structure Generation for End-to-end Event Extraction. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/TJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 2795-2806. https://doi.org/10.18653/v1/2021.acl-long.217
344
+ [39] Jason Phang, Thibault Féry, and Samuel R. Bowman. 2018. Sentence Encoders on STILTs: Supplementary Training on Intermediate Labeled-data Tasks. CoRR abs/1811.01088 (2018). arXiv:1811.01088 http://arxiv.org/abs/1811.01088
345
+ [40] Timo Schick and Hinrich Schütze. 2021. Exploiting Cloze-Questions for Few-Shot Text Classification and Natural Language Inference. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, EACL 2021, Online, April 19 - 23, 2021, Paola Merlo, Jörg Tiedemann, and Reut Tsarfaty (Eds.). Association for Computational Linguistics, 255-269. https://www.aclweb.org/anthology/2021.eacl-main.20/
346
+ [41] Lei Sha, Feng Qian, Baobao Chang, and Zhifang Sui. 2018. Jointly Extracting Event Triggers and Arguments by Dependency-Bridge RNN and Tensor-Based Argument Interaction. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, Sheila A. McIlraith and Kilian Q. Weinberger (Eds.). AAAI Press, 5916-5923. https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/16222
347
+ [42] Jiawei Sheng, Shu Guo, Zhenyu Chen, Juwei Yue, Lihong Wang, Tingwen Liu, and Hongbo Xu. 2020. Adaptive Attentional Network for Few-Shot Knowledge Graph Completion. In Proceedings of the 2020 Conference on Empirical Methods in Natural
348
+
349
+ Language Processing, EMNLP 2020, Online, November 16-20, 2020, Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, 1681-1691. https://doi.org/10.18653/v1/2020.emnlp-main.131
350
+ [43] Taylor Shin, Yasaman Razeghi, Robert L. Logan IV, Eric Wallace, and Sameer Singh. 2020. AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, November 16-20, 2020, Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, 4222-4235. https://doi.org/10.18653/v1/2020.emnlp-main.346
351
+ [44] YuSheng Su, Xu Han, Zhengyan Zhang, Peng Li, Zhiyuan Liu, Yankai Lin, Jie Zhou, and Maosong Sun. 2020. Contextual Knowledge Selection and Embedding towards Enhanced Pre-Trained Language Models. CoRR abs/2009.13964 (2020). arXiv:2009.13964 https://arxiv.org/abs/2009.13964
352
+ [45] Zhiqing Sun, Zhi-Hong Deng, Jian-Yun Nie, and Jian Tang. 2019. RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net. https://openreview.net/forum?id=HkgEQnRqYQ
353
+ [46] Derek Tam, Rakesh R. Menon, Mohit Bansal, Shashank Srivastava, and Colin Raffel. 2021. Improving and Simplifying Pattern Exploiting Training. CoRR abs/2103.11955 (2021). arXiv:2103.11955 https://arxiv.org/abs/2103.11955
354
+ [47] Meihan Tong, Bin Xu, Shuai Wang, Yixin Cao, Lei Hou, Juanzi Li, and Jun Xie. 2020. Improving Event Detection via Open-domain Trigger Knowledge. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetreault (Eds.). Association for Computational Linguistics, 5887-5897. https://doi.org/10.18653/v1/2020.acl-main.522
355
+ [48] Théo Trouillon, Johannes Welbl, Sebastian Riedel, Éric Gaussier, and Guillaume Bouchard. 2016. Complex Embeddings for Simple Link Prediction. In Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016 (JMLR Workshop and Conference Proceedings, Vol. 48), Maria-Florina Balcan and Kilian Q. Weinberger (Eds.). JMLR.org, 2071-2080. http://proceedings.mlr.press/v48/trouillon16.html
356
+ [49] Marc Vilain, John D Burger, John Aberdeen, Dennis Connolly, and Lynette Hirschman. 1995. A model-theoretic coreference scoring scheme. In Sixth Message Understanding Conference (MUC-6): Proceedings of a Conference Held in Columbia, Maryland, November 6-8, 1995.
357
+ [50] David Wadden, Ulme Wennberg, Yi Luan, and Hannaneh Hajishirzi. 2019. Entity, Relation, and Event Extraction with Contextualized Span Representations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, EMNLP-IJCNLP 2019, Hong Kong, China, November 3-7, 2019, Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (Eds.). Association for Computational Linguistics, 5783-5788. https://doi.org/10.18653/v1/D19-1585
358
+ [51] Xing David Wang, Leon Weber, and Ulf Leser. 2020. Biomedical Event Extraction as Multi-turn Question Answering. In Proceedings of the 11th International Workshop on Health Text Mining and Information Analysis, LOUHI@EMNLP 2020, Online, November 20, 2020, Eben Holderness, Antonio Jimeno-Yepes, Alberto Lavelli, Anne-Lyse Minard, James Pustejovsky, and Fabio Rinaldi (Eds.). Association for Computational Linguistics, 88-96. https://doi.org/10.18653/v1/2020.louhi-1.10
359
+ [52] Zhen Wang, Jianwen Zhang, Jianlin Feng, and Zheng Chen. 2014. Knowledge Graph Embedding by Translating on Hyperplanes. In Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence, July 27-31, 2014, Quebec City, Quebec, Canada, Carla E. Brodley and Peter Stone (Eds.). AAAI Press, 1112-1119. http://www.aaai.org/ocs/index.php/AAAI/AAAI14/paper/view/8531
360
+ [53] Yuejia Xiang, Ziheng Zhang, Jiaoyan Chen, Xi Chen, Zhenxi Lin, and Yefeng Zheng. 2021. OntoEA: Ontology-guided Entity Alignment via Joint Knowledge Graph Embedding. In Findings of the Association for Computational Linguistics: ACL/TJCNLP 2021, Online Event, August 1-6, 2021 (Findings of ACL, Vol. ACL/TJCNLP 2021), Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli (Eds.). Association for Computational Linguistics, 1117-1128. https://doi.org/10.18653/v1/2021.findings-acl.96
361
+ [54] Qizhe Xie, Zihang Dai, Eduard H. Hovy, Thang Luong, and Quoc Le. 2020. Unsupervised Data Augmentation for Consistency Training. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin (Eds.). https://proceedings.neurips.cc/paper/2020/bitnet/44feb0096faa8326192570788b38c1d1-Abstract.html
362
+ [55] Fuzhao Xue, Aixin Sun, Hao Zhang, and Eng Siong Chng. 2021. GDPNet: Refining Latent Multi-View Graph for Relation Extraction. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021. AAAI Press, 14194-14202. https://ojs.aaii.org/index.php/AAAI/article/view/17670
363
+
364
+ [56] Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, and Li Deng. 2015. Embedding Entities and Relations for Learning and Inference in Knowledge Bases. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.). http://arxiv.org/abs/1412.6575
365
+ [57] Liang Yao, Chengsheng Mao, and Yuan Luo. 2019. KG-BERT: BERT for Knowledge Graph Completion. CoRR abs/1909.03193 (2019). arXiv:1909.03193 http://arxiv.org/abs/1909.03193
366
+ [58] Hongbin Ye, Ningyu Zhang, Zhen Bi, Shumin Deng, Chuanqi Tan, Hui Chen, Fei Huang, and Huajun Chen. 2021. Learning to Ask for Data-Efficient Event Argument Extraction. CoRR abs/2110.00479 (2021). arXiv:2110.00479 https://arxiv.org/abs/2110.00479
367
+ [59] Hongbin Ye, Ningyu Zhang, Zhen Bi, Shumin Deng, Chuanqi Tan, Hui Chen, Fei Huang, and Huajun Chen. 2021. Learning to Ask for Data-Efficient Event Argument Extraction. arXiv preprint arXiv:2110.00479 (2021).
368
+ [60] Wenpeng Yin, Nazeneen Fatema Rajani, Dragomir R. Radev, Richard Socher, and Caiming Xiong. 2020. Universal Natural Language Processing with Limited Annotations: Try Few-shot Textual Entailment as a Start. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, November 16-20, 2020, Bonnie Webber, Trevor Cohn, Yulan He, and Yang Liu (Eds.). Association for Computational Linguistics, 8229-8239. https://doi.org/10.18653/v1/2020.emnlp-main.660
369
+ [61] Dian Yu, Kai Sun, Claire Cardie, and Dong Yu. 2020. Dialogue-Based Relation Extraction. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel R. Tetreault (Eds.). Association for Computational Linguistics, 4927-4940. https://doi.org/10.18653/v1/2020.acl-main.444
370
+ [62] Haiyang Yu, Ningyu Zhang, Shumin Deng, Hongbin Ye, Wei Zhang, and Huajun Chen. 2020. Bridging Text and Knowledge with Multi-Prototype Embedding for Few-Shot Relational Triple Extraction. In Proceedings of the 28th International Conference on Computational Linguistics, COLING 2020, Barcelona, Spain (Online), December 8-13, 2020, Donia Scott, Núria Bel, and Chengqing Zong (Eds.). International Committee on Computational Linguistics, 6399-6410. https://doi.org/10.18653/v1/2020.coling-main.563
371
+ [63] Chuxu Zhang, Huaxiu Yao, Chao Huang, Meng Jiang, Zhenhui Li, and Nitesh V. Chawla. 2020. Few-Shot Knowledge Graph Completion. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020. AAAI Press, 3041-3048. https://aaai.org/ojs/index.php/AAAI/article/view/5698
372
+ [64] Ningyu Zhang, Xiang Chen, Xin Xie, Shumin Deng, Chuanqi Tan, Mosha Chen, Fei Huang, Luo Si, and Huajun Chen. 2021. Document-level Relation Extraction as Semantic Segmentation. arXiv preprint arXiv:2106.03618 (2021).
373
+ [65] Ningyu Zhang, Shumin Deng, Xu Cheng, Xi Chen, Yichi Zhang, Wei Zhang, and Huajun Chen. 2021. Drop Redundant, Shrink Irrelevant: Selective Knowledge Injection for Language Pretraining. In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IFCAI 2021, Virtual Event / Montreal, Canada, 19-27 August 2021, Zhi-Hua Zhou (Ed.). ijcai.org, 4007-4014. https://doi.org/10.24963/ijcai.2021/552
374
+ [66] Ningyu Zhang, Shumin Deng, Zhanlin Sun, Jiaoyan Chen, Wei Zhang, and Huajun Chen. 2020. Relation Adversarial Network for Low Resource Knowledge Graph Completion. In WWW '20: The Web Conference 2020, Taipei, Taiwan, April 20-24, 2020, Yennun Huang, Irwin King, Tie-Yan Liu, and Maarten van Steen (Eds.). ACM / IW3C2, 1-12. https://doi.org/10.1145/3366423.3380089
375
+ [67] Ningyu Zhang, Shumin Deng, Zhanling Sun, Xi Chen, Wei Zhang, and Huajun Chen. 2018. Attention-Based Capsule Network with Dynamic Routing for Relation Extraction. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 - November 4, 2018, Ellen Roliff, David Chiang, Julia Hockenmaier, and Jun'ichi Tsuji (Eds.). Association for Computational Linguistics, 986-992. https://doi.org/10.18653/v1/d18-1120
376
+ [68] Ningyu Zhang, Shumin Deng, Zhanlin Sun, Guanying Wang, Xi Chen, Wei Zhang, and Huajun Chen. 2019. Long-tail Relation Extraction via Knowledge Graph Embeddings and Graph Convolution Networks. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), Jill Burstein, Christy Doran, and Thamar Solorio (Eds.). Association for Computational Linguistics, 3016-3025. https://doi.org/10.18653/v1/n19-1306
377
+ [69] Ningyu Zhang, Shumin Deng, Hongbin Ye, Wei Zhang, and Huajun Chen. 2022. Robust triple extraction with cascade bidirectional capsule network. Expert Systems with Applications 187 (2022), 115806. https://doi.org/10.1016/j.eswa.2021.115806
378
+ [70] Ningyu Zhang, Qianghuai Jia, Shumin Deng, Xiang Chen, Hongbin Ye, Hui Chen, Huaxiao Tou, Gang Huang, Zhao Wang, Nengwei Hua, and Huajun Chen. 2021. AliCG: Fine-grained and Evolvable Conceptual Graph Construction for Semantic Search at Alibaba. In KDD.
379
+
380
+ [71] Ningyu Zhang, Luoqiu Li, Xiang Chen, Shumin Deng, Zhen Bi, Chuanqi Tan, Fei Huang, and Huajun Chen. 2021. Differentiable Prompt Makes Pre-trained Language Models Better Few-shot Learners. arXiv preprint arXiv:2108.13161 (2021).
381
+ [72] Ningyu Zhang, Xin Xie, Xiang Chen, Shumin Deng, Chuanqi Tan, Fei Huang, Xu Cheng, and Huajun Chen. 2022. Reasoning Through Memorization: Nearest Neighbor Knowledge Graph Embeddings. CoRR abs/2201.05575 (2022). arXiv:2201.05575 https://arxiv.org/abs/2201.05575
382
+ [73] Ningyu Zhang, Xin Xu, Liankuan Tao, Haiyang Yu, Hongbin Ye, Xin Xie, Xiang Chen, Zhoubo Li, Lei Li, Xiaozhuan Liang, Yunzhi Yao, Shumin Deng, Zhenru Zhang, Chuanqi Tan, Fei Huang, Guozhou Zheng, and Huajun Chen. 2022. DeepKE: A Deep Learning Based Knowledge Extraction Toolkit for Knowledge Base Population. CoRR abs/2201.03335 (2022). arXiv:2201.03335 https://arxiv.org/abs/2201.03335
383
+ [74] Ningyu Zhang, Hongbin Ye, Shumin Deng, Chuanqi Tan, Mosha Chen, Songfang Huang, Fei Huang, and Huajun Chen. 2021. Contrastive Information Extraction With Generative Transformer. IEEE/ACM Transactions on Audio, Speech, and Language Processing 29 (2021), 3077-3088. https://doi.org/10.1109/TASLP.2021.3110126
384
+ [75] Ningyu Zhang, Hongbin Ye, Jiacheng Yang, Shumin Deng, Chuanqi Tan, Mosha Chen, Songfang Huang, Fei Huang, and Huajun Chen. 2021. LOGEN: Few-shot Logical Knowledge-Conditioned Text Generation with Self-training. CoRR abs/2112.01404 (2021). arXiv:2112.01404 https://arxiv.org/abs/2112.01404
385
+ [76] Wen Zhang, Bibek Paudel, Liang Wang, Jiaoyan Chen, Hai Zhu, Wei Zhang, Abraham Bernstein, and Huajun Chen. 2019. Iteratively Learning Embeddings and Rules for Knowledge Graph Reasoning. In The World Wide Web Conference, WWW 2019, San Francisco, CA, USA, May 13-17, 2019, Ling Liu, Ryen W. White, Amin Mantrach, Fabrizio Silvestri, Julian J. McAuley, Ricardo Baeza-Yates, and Leila Zia (Eds.). ACM, 2366-2377. https://doi.org/10.1145/3308558.3313612
386
+ [77] Yao Zhang, Xu Zhang, Jun Wang, Hongru Liang, Wenqiang Lei, Zhe Sun, Adam Jatowt, and Zhenglu Yang. 2021. Generalized Relation Learning with Semantic Correlation Awareness for Link Prediction. In Thirty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2021, Thirty-Third Conference on Innovative Applications of Artificial Intelligence, IAAI 2021, The Eleventh Symposium on Educational Advances in Artificial Intelligence, EAAI 2021, Virtual Event, February 2-9, 2021. AAAI Press, 4679-4687. https://ojs.aaii.org/index.php/AAI/article/view/16598
387
+ [78] Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019. ERNIE: Enhanced Language Representation with Informative Entities. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Volume 1: Long Papers, Anna Korhonen, David R. Traum, and Lluis Marquez (Eds.). Association for Computational Linguistics, 1441-1451. https://doi.org/10.18653/v1/p19-1139
388
+ [79] Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019. ERNIE: Enhanced Language Representation with Informative Entities. In ACL. 1441-1451.
389
+
390
+ ![](images/74b76860f6c617e23abbb96036828d940197287c7593dd643563b47839842d68.jpg)
391
+ A CONVERGENCE ANALYSIS
392
+ Figure 5: Convergence Analysis Results of OntoPrompt.
2201.11xxx/2201.11332/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:463c94ff635956098852a1e48bf99acf1929a577d7ea9a3961f2c84e6da18c3b
3
+ size 751947
2201.11xxx/2201.11332/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11349/d1d35eb5-e8bc-4854-99d4-0edf55cb9c66_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c6fe5aeeb8183b20e1a51426692afc42aff13b6ae4860b5349b4753eb8567c1
3
+ size 4512069
2201.11xxx/2201.11349/full.md ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Confidence May Cheat: Self-Training on Graph Neural Networks under Distribution Shift
2
+
3
+ Hongrui Liu* liuhongrui@bupt.edu.cn Beijing University of Posts and Telecommunications
4
+
5
+ Binbin Hu bin.hbb@antfin.com Ant Group
6
+
7
+ Xiao Wang
8
+ xiaowang@bupt.edu.cn
9
+ Beijing University of Posts and Telecommunications
10
+ Peng Cheng Laboratory
11
+
12
+ Chuan Shi†
13
+ shichuan@bupt.edu.cn
14
+ Beijing University of Posts and Telecommunications
15
+ Peng Cheng Laboratory
16
+
17
+ Zhiqiang Zhang lingyao.zzq@antfin.com Ant Group
18
+
19
+ Jun Zhou
20
+ jun.zhoujun@antfin.com
21
+ Ant Group
22
+
23
+ # ABSTRACT
24
+
25
+ Graph Convolutional Networks (GCNs) have recently attracted vast interest and achieved state-of-the-art performance on graphs, but its success could typically hinge on careful training with amounts of expensive and time-consuming labeled data. To alleviate labeled data scarcity, self-training methods have been widely adopted on graphs by labeling high-confidence unlabeled nodes and then adding them to the training step. In this line, we empirically make a thorough study for current self-training methods on graphs. Surprisingly, we find that high-confidence unlabeled nodes are not always useful, and even introduce the distribution shift issue between the original labeled dataset and the augmented dataset by self-training, severely hindering the capability of self-training on graphs. To this end, in this paper, we propose a novel Distribution Recovered Graph Self-Training framework (DR-GST), which could recover the distribution of the original labeled dataset. Specifically, we first prove the equality of loss function in self-training framework under the distribution shift case and the population distribution if each pseudo-labeled node is weighted by a proper coefficient. Considering the intractability of the coefficient, we then propose to replace the coefficient with the information gain after observing the same changing trend between them, where information gain is respectively estimated via both dropout variational inference and dropout variational inference in DR-GST. However, such a weighted loss function will enlarge the impact of incorrect pseudo labels. As a result, we apply the loss correction method to improve the quality of pseudo labels. Both our theoretical analysis and extensive experiments on five benchmark datasets demonstrate the effectiveness of the proposed DR-GST, as well as each well-designed component in DR-GST.
26
+
27
+ *Work done during internship at Ant Group. † Corresponding author
28
+
29
+ WWW '22, April 25-29, 2022, Virtual Event, Lyon, France
30
+ © 2022 Association for Computing Machinery.
31
+ This is the author's version of the work. It is posted here for your personal use. Not for redistribution. The definitive Version of Record was published in Proceedings of the ACM Web Conference 2022 (WWW '22), April 25-29, 2022, Virtual Event, Lyon, France, https://doi.org/10.1145/3485447.3512172.
32
+
33
+ # CCS CONCEPTS
34
+
35
+ - Computing methodologies $\rightarrow$ Neural networks; - Theory of computation $\rightarrow$ Social networks; Semi-supervised learning.
36
+
37
+ # KEYWORDS
38
+
39
+ Graph Neural Networks, Self-Training, Information Gain
40
+
41
+ # ACM Reference Format:
42
+
43
+ Hongrui Liu, Binbin Hu, Xiao Wang, Chuan Shi, Zhiqiang Zhang, and Jun Zhou. 2022. Confidence May Cheat: Self-Training on Graph Neural Networks under Distribution Shift. In Proceedings of the ACM Web Conference 2022 (WWW '22), April 25–29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3485447.3512172
44
+
45
+ # 1 INTRODUCTION
46
+
47
+ Graphs are ubiquitous across many real-world applications, ranging from citation and social network analysis to protein interface and chemical bond prediction. With the surge of demands, Graph Convolution Network (GCN) and its variants [17, 18, 30, 32, 35] (abbreviated as GCNs) have recently attracted vast interest and achieved state-of-the-art performance in various tasks on graphs, most notably semi-supervised node classification. Nevertheless, its success could typically hinge on careful training with large amounts of labeled data, which is expensive and time-consuming to be obtained [28]. Empirically, the performance of GCNs will rapidly decline with the decrease of labeled data [38].
48
+
49
+ As one of the promising approaches, self-training [16, 19] aims at addressing labeled data scarcity by making full use of abundant unlabeled data in addition to task-specific labeled data. Given an arbitrary model trained on the original labeled data as the teacher model, the key idea of self-training is to pseudo-label high-confidence unlabeled samples to augment the above labeled data, and a student model is trained with augmented data to replace the teacher model. Such an iteration learning is repeated until convergence<sup>1</sup>. Analogously, self-training has great potential to facilitate advancing GCNs to exploiting unlabeled data [20, 28, 38]. Whereas, these studies only focus on the high-confidence nodes on account of the prefabricated assumption that the higher the confidence, the more
50
+
51
+ accurate the prediction. Naturally, we are curious about such a fundamental question, "Are all the unlabeled nodes pseudo-labeled with high confidence truly useful?"
52
+
53
+ As a motivating example, we conduct an analysis experiment on a benchmark dataset Cora [26] to explore how much additional information these high-confidence nodes can bring to the model (denoted as information gain). More details can be seen in Section 3. Surprisingly, our experimental results show a clear negative correlation between the confidence and the information gain, implying that nodes pseudo-labeled by existing graph self-training methods with high confidence may be low-information-gain and useless. To further understand the underlying reason, we illustrate the distribution of unlabeled nodes and find these high-confidence (or low-information-gain) nodes are far from the decision boundary, which implies that they potentially guide the model to perform worthless optimization for a more crisp decision boundary. Existing graph self-training methods which focus on high-confidence nodes are "cheated" by confidence in this way.
54
+
55
+ In light of the above observations, we further investigate into what will happen when self-training is cheated by confidence. We discover that during the optimization procedure dominated by easy nodes (i.e., nodes with low information gain), the Distribution Shift phenomenon between the original and augmented dataset gradually appears. This is because more and more easy nodes selected by high confidence are added to the original labeled dataset, leading to the distribution gradually shifting to the augmented dataset and overmuch attention paid on such easy nodes as a result. Not surprisingly, this issue will severely threaten the capacity of self-training on graphs, since the distribution of the augmented dataset is different from the population distribution, resulting in a terrible generalization during evaluation. Alleviating distribution shift from self-training on graphs is in urgent demand, which is unexplored in existing studies.
56
+
57
+ In this paper, we propose an information gain weighted self-training framework DR-GST which could recover the distribution of original labeled dataset. Specifically, we first prove that the loss function of the self-training framework under the distribution shift case is equal to that under the population distribution if we could weight each pseudo-labeled node with a proper coefficient. But the coefficient is generally intractable in practice. Then we discover the same changing trend between the coefficient and information gain, and propose to replace the coefficient with information gain, where the information gain can be estimated via both dropout variational inference and dropout variational inference. Consequently, we can recover the shifted distribution with the newly proposed information gain weighted loss function. Such a loss function forces the model to pay more attention to hard nodes, i.e., nodes with high information gain, but will enlarge the impact of incorrect pseudo labels. Therefore, we apply loss correction [10, 23, 27] to self-training to correct the prediction of the student model, so that the impact of incorrect pseudo labels from the teacher model can be alleviated in this way. Finally, we conduct a theoretical analysis of self-training on graphs, and the conclusion shows both distribution shift and incorrect pseudo labels will severely hinder its capability, which is consistent with our designs.
58
+
59
+ In summary, the main contributions are highlighted as follows:
60
+
61
+ - We make a thorough study on graph self-training, and find two phenomena below: 1) pseudo-labeled high-confidence nodes may cheat. 2) distribution shift between the original labeled dataset and the augmented dataset. Both of them severely hinder the capability of self-training on graphs.
62
+ - We propose a novel graph self-training framework DR-GST that not only addresses the distribution shift issue from the view of information gain, but also is equipped with the creative loss correction strategy for improving qualities of pseudo labels.
63
+ - We theoretically analyze the rationality of the whole DR-GST framework and extensive experimental results on five benchmark datasets demonstrates that DR-GST consistently and significantly outperforms various state-of-arts.
64
+
65
+ # 2 PRELIMINARY
66
+
67
+ Let $\mathcal{G} = (\mathcal{V},\mathcal{E},\mathrm{X})$ be a graph with the adjacent matrix $\mathrm{A}\in \mathbb{R}^{|\mathcal{V}|\times |\mathcal{V}|}$ where $\mathcal{V}$ and $\mathcal{E}$ are respectively the set of nodes and edges, and $\mathrm{X} = [\mathrm{x}_1,\mathrm{x}_2,\dots ,\mathrm{x}_{|\mathcal{V}|}]\in \mathbb{R}^{|\mathcal{V}|\times D_o}$ is the $D_{o}$ -dimensional feature matrix for nodes. In the common semi-supervised node classification setting, we only have access to a small amount of labeled nodes $\nu_{L}$ with their labels $\nu_{L}$ along with a larger amounts of unlabeled nodes $\nu_{U}$ , where $|\nu_L|\ll |\nu_U|$ .
68
+
69
+ Self-training Generally, self-training methods on graphs firstly train a vanilla GCN as the base teacher model $f_{\theta}$ (X, A) with ground-truth labels $\mathcal{V}_L$ , where $\theta$ is the model parameter set. We could obtain the probability vector for each node $v_i \in \mathcal{V}$ as: $p(y_i | x_i, A; \theta) = f_{\theta}(x_i, A)$ . For convenience, we abbreviate it to $p_i$ and denote the $j$ -th element of $p_i$ by $p_{i,j}$ . Next, the teacher model pseudo-labels a subset $S_U \subset \mathcal{V}_U$ of unlabeled nodes with its prediction $\bar{y}_u = \arg \max_j p_{u,j}$ for each node $v_u \in S_U$ . The selection of $S_U$ is based on the confidence score $r_i = \max_j p_{i,j}$ , i.e., only nodes with $r_i$ higher than a threshold or top- $k$ high-confidence nodes are added to the labeled dataset. Then the augmented dataset $\mathcal{V}_L \cup S_U$ is used to train a student model $f_{\bar{\theta}}$ with the following objective function.
70
+
71
+ $$
72
+ \begin{array}{l} \min _ {\bar {\theta} \in \Theta} \mathcal {L} (\mathrm {A}, \mathrm {X}, \mathcal {Y} _ {L}) = \min _ {\bar {\theta} \in \Theta} \mathbb {E} _ {v _ {i} \in \mathcal {V} _ {L}, y _ {i} \in \mathcal {Y} _ {L}} l (y _ {i}, \mathrm {p} _ {i}) \\ + \lambda \mathbb {E} _ {\upsilon_ {u} \in \mathcal {S} _ {U}, \mathcal {S} _ {U} \subset \mathcal {V} _ {U}} \mathbb {E} _ {\bar {y} _ {u} \sim \mathrm {p} (y _ {u} | \mathrm {x} _ {u}, A; \theta)} l (\bar {y} _ {u}, \mathrm {p} _ {u}), \tag {1} \\ \end{array}
73
+ $$
74
+
75
+ where $l(y_{i}, p_{i}) = -\log p_{i, y_{i}}$ is the multi-class cross entropy loss and we fix $\lambda = 1$ in this paper. Finally we replace the teacher model with the student model and iterate the above procedure until convergence.
76
+
77
+ Information Gain As can be seen in Eq. 1, self-training on graphs will exploit the unlabeled data to train the whole model. Here, we aim to measure how an unlabeled node contributes to the model optimization in a principled way, i.e., information gain. Information gain usually measures the reduction in information given a random variable, where information is generally calculated by the Shannon's entropy [6]. We utilize the information gain here to seek the node $v_{u}$ which owns the most information about parameters $\theta$ of model posterior and could reduce the number of possible parameter hypotheses maximally fast. We refer to this type of information gain as information gain about model parameters [22]. Formally, given a node $v_{u}$ , the information gain about model
78
+
79
+ ![](images/7ece0c81647d63c6d2cc134f70a23623d2eab1593c07d21f2326b8151e4ff4e5.jpg)
80
+ Figure 1: (a): Relationship between confidence and information gain on Cora. (b): Visualization of embeddings on Cora
81
+
82
+ parameters is defined as $\mathbb{B}_u$ , which could be calculated as follows:
83
+
84
+ $$
85
+ \begin{array}{l} \mathbb {B} _ {u} \left(y _ {u}, \theta | \mathrm {x} _ {u}, \mathrm {A}, \mathcal {G}\right) = \mathbb {H} \left[ \mathbb {E} _ {P (\theta | \mathcal {G})} \left[ y _ {u} | \mathrm {x} _ {u}, \mathrm {A}; \theta \right] \right] (2) \\ - \mathbb {E} _ {P (\theta | \mathcal {G})} [ \mathbb {H} [ y _ {u} | \mathrm {x} _ {u}, \mathrm {A}; \theta ] ], (2) \\ \end{array}
86
+ $$
87
+
88
+ where $\mathbb{H}(\cdot)$ denotes the Shannon's entropy and $P(\theta | \mathcal{G})$ is the distribution of model posterior. The first term measures the information of the model parameters under posterior, while the second term captures the information of model parameters given an additional node $v_u$ . Obviously, by calculating the difference between the two terms above, $\mathbb{B}_u$ can measure how much information $v_u$ can bring to learn the model parameters $\theta$ .
89
+
90
+ # 3 EMPIRICAL ANALYSIS
91
+
92
+ In this section, we conduct a series of empirical analysis to examine whether current graph self-training approaches adopt a principled way to leverage unlabeled data for semi-supervised node classification.
93
+
94
+ Empirical Analysis of Confidence To better understand the capacity of high-confidence nodes in current self-training approaches, we aim to closely examine that how much additional information these nodes can bring to the model based on information gain. We first visualize the relationship between confidence and information gain in Fig. 1(a), where the x-axis is the confidence while the y-axis is the information gain, and the blue and orange dots respectively represent nodes with correct and incorrect predictions. From Fig. 1(a) we can observe a negative correlation, implying that existing graph self-training methods only focus on easy nodes (nodes with low information gain) and confidence may be cheating as a result. Essentially, such a "cheating" phenomenon lies in the worthless optimization for a more crisp decision boundary. Specifically, as shown in Fig. 1(b), on the Cora dataset, we visualize the node embeddings on the last layer of the standard GCN before softmax using $t$ -SNE [29] algorithm, where a darker dot represents a node with lower information gain. From the plots, we find that most of easy nodes (i.e., low information gain) are far from the decision boundary. Whereas, these nodes are always emphasized by current self-training methods on graphs [20, 28, 38] by force of high confidence. That is, these methods are "cheated" by confidence in this way.
95
+
96
+ Empirical Analysis of Distribution Shift Furthermore, we investigate what will happen when self-training has been cheated by confidence. As an illustrative example, we randomly generate
97
+
98
+ ![](images/b36bf3ec004563ae4d6b33e0b1807c8607453b66c230df01646faf622741ba85.jpg)
99
+ (a) $P_{pop}$
100
+
101
+ ![](images/815f6f1447716c1a1f9ccdbaaa21f06ba5f9990c5556529dc78cc384b2f76ed4.jpg)
102
+ (b) $P_{st}$
103
+ (c) ratio of $P_{pop}$ and $P_{st}$
104
+
105
+ ![](images/16d958c790034270ed6997f04ece5b4178bdaa2d5753ce54b3a6ea8a976cabc3.jpg)
106
+ Figure 2: Visualization of labeled nodes under the ideal condition. $(P_{pop}$ : distribution before self-training, $P_{st}$ : distribution after self-training)
107
+
108
+ 500 nodes (blue) following two-dimensional Gaussian distribution $\mathcal{N}(0, 0, 0.3, 0.3, 0)$ to represent labeled nodes in one class, and another 4000 nodes (grey) following the distribution of concentric circles [1] to represent labeled nodes belonging to other classes, as shown in Fig. 2(a). Furthermore, following the common self-training setting, a large amount of unlabeled nodes still exists in the dataset, but for clarity, we omit them in the figure. In line with the core idea of current self-training methods, for the "blue" class, unlabeled nodes around the center are pseudo-labeled for self-training since these nodes have high confidence (a.k.a., far from the decision boundary). During iteration, as shown in Fig 2(b), the data distribution will become more and more sharpen since nodes far from the decision boundary are paid disproportionate attention and thus the unsatisfying Distribution Shift phenomenon between the original and augmented dataset indeed appears.
109
+
110
+ # 4 THE DR-GST FRAMEWORK
111
+
112
+ In this section, we elaborate the proposed DR-GST, a novel self-training framework aiming at recovering the shifted distribution.
113
+
114
+ # 4.1 Information Gain Weighted Loss Function Towards Distribution Shift
115
+
116
+ We start with the formulation of the self-training task by analyzing the corresponding loss functions. Specifically, assuming that the original labeled dataset follows the population distribution $P_{pop}$ , given a classifier $f_{\theta}$ parameterized by $\theta$ , the best parameter set $\theta$ could be obtained via minimizing the following loss function:
117
+
118
+ $$
119
+ \mathcal {L} _ {p o p} = \mathbb {E} _ {\left(v _ {i}, y _ {i}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l \left(y _ {i}, \mathrm {p} _ {i}\right). \tag {3}
120
+ $$
121
+
122
+ Similarly, under the distribution shift case caused by self-training, the loss function can be represented as
123
+
124
+ $$
125
+ \begin{array}{l} \mathcal {L} _ {s t} = \frac {\left| \mathcal {V} _ {L} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {\left(v _ {i}, y _ {i}\right) \sim P _ {p o p} (\nu , \mathcal {Y})} l \left(y _ {i}, \mathrm {p} _ {i}\right) \tag {4} \\ + \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} l (\bar {y} _ {u}, \mathrm {p} _ {u}), \\ \end{array}
126
+ $$
127
+
128
+ where $P_{st}$ represents the shifted distribution of the augmented dataset.
129
+
130
+ Generally, the distribution shift could lead to a terrible generalization during evaluation, and thus severely threaten the capacity of graph self-training. Therefore, It is ideal to optimize $f_{\theta}$ with the loss function $\mathcal{L}_{pop}$ under the population distribution rather than
131
+
132
+ $\mathcal{L}_{st}$ under the distribution shift case. However, only $\mathcal{L}_{st}$ is available in practice. To close the gap, we show the following theorem.
133
+
134
+ THEOREM 4.1. Given $\mathcal{L}_{pop}$ and $\mathcal{L}_{st}$ defined in Eq. 3 and Eq. 4, assuming that $\bar{y}_u = y_u$ for each pseudo-labeled node $v_u \in S_U$ , then $\mathcal{L}_{st} = \mathcal{L}_{pop}$ holds true if $\mathcal{L}_{st}$ can be written with an additional weight coefficient $\gamma_u = \frac{P_{pop}(v_u, y_u)}{P_{st}(v_u, y_u)}$ as follows:
135
+
136
+ $$
137
+ \begin{array}{l} \mathcal {L} _ {s t} = \frac {\left| \mathcal {S} _ {U} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\nu , \nu)} \gamma_ {u} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) \tag {5} \\ + \frac {| \mathcal {V} _ {L} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {i}, y _ {i}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l (y _ {i}, \mathrm {p} _ {i}), \\ \end{array}
138
+ $$
139
+
140
+ PROOF. Please refer to A.1.1.
141
+
142
+ ![](images/5dbf9f86a4d3b64c22e89b75fb630fcfa78551c1c2a5f312e46347f58ad49659.jpg)
143
+
144
+ Based on Theorem 4.1, we can find that our desired $\mathcal{L}_{pop}$ can be written as the available $\mathcal{L}_{st}$ only if a coefficient $\gamma_{u}$ is added to $\mathcal{L}_{st}$ . In other words, the distribution shift issue could be addressed by optimizing $f_{\theta}$ with available $\mathcal{L}_{st}$ weighted by $\gamma_{u}$ (in Eq. 5). However, it should be noted that the population distribution $P_{pop}$ in $\mathcal{L}_{st}$ is generally intractable, which means that $\gamma_{u}$ cannot be accurately calculated.
145
+
146
+ To this end, we propose to build the bridge between $\gamma_{u}$ and the information gain, which is motivated as follows. Recalling the data distributions shown in Fig. 2(a) and Fig. 2(b), we could formally represent the former as $P_{pop}$ and the latter as $P_{st}$ . We visualize the desired weight coefficient $\gamma_{u} = \frac{P_{pop}(v_{u},y_{u})}{P_{st}(v_{u},y_{u})}$ for each pseudo-labeled node $v_{u}$ in Fig. 2(c) for better understanding its changing trend, where the darker area means the larger $\gamma_{u}$ . Obviously, we observe that $\gamma_{u}$ becomes smaller when getting closer to the center area (a.k.a., far away from the decision boundary), which is consistent with the change trend of the information gain. This finding inspires us to adopt the information gain to approximate $\gamma_{u}$ .
147
+
148
+ # 4.2 Information Gain Estimation on Graphs
149
+
150
+ Next, we elaborate the estimation of the information gain for each node $v_{u}$ in graph. As mentioned in Eq. 2, the distribution of model posterior $P(\theta|\mathcal{G})$ is desired for calculating information gain, but it is intractable in practice, and always computationally expensive for traditional bayesian neural networks [2, 11, 15]. Instead, we could shift attention towards dropout [9] and dropedge [25], a type of regularization technique for preventing over-fitting and over-smoothing in GCNs, which could be both interpreted as an approximation of $P(\theta|\mathcal{G})$ [13]. Consequently, we propose to estimate the information gain assisted with dropout and dropedge (a.k.a., dropout and dropedge variational inference), which takes into account both features and the network topology in our unified framework DR-GST. For distinction, we refer to DR-GST with dropout variational inference as $\mathrm{DR - GST}_{do}$ and that with dropedge variational inference as $\mathrm{DR - GST}_{de}$ .
151
+
152
+ 4.2.1 Dropout Variational Inference. Specifically, given a $L$ -layer GCN model $f_{\theta}$ , its $l$ -th layer output $\mathrm{H}^{(l)} \in \mathbb{R}^{|\mathcal{V}| \times D_l}$ can be obtained by
153
+
154
+ $$
155
+ \mathrm {H} ^ {(l)} = \sigma (\mathfrak {N} (\mathrm {A}) \mathrm {H} ^ {(l - 1)} \mathrm {W} ^ {(l - 1)}), \tag {6}
156
+ $$
157
+
158
+ where $\Re (\cdot)$ represents the normalizing operator, $W^{(l - 1)}\in \mathbb{R}^{D_{l - 1}\times D_l}$ is the (l-1)-th layer weight matrix, $\sigma (\cdot)$ is the activation function and
159
+
160
+ $\mathrm{H}^{(1)} = \mathrm{X}\in \mathbb{R}^{|\mathcal{V}|\times D_v},\theta = \{\mathrm{W}^{(l)}\}_{l = 1}^L$ . Dropout randomly masks features of nodes in the graph through drawing from an independent Bernoulli random variable. Formally, the $l$ -th layer output of $f_{\theta}$ with dropout can be written as:
161
+
162
+ $$
163
+ \mathrm {H} ^ {(l)} = \sigma (\mathfrak {N} (\mathrm {A}) (\mathrm {H} ^ {(l - 1)} \odot \mathrm {Z} ^ {(l - 1)}) \mathrm {W} ^ {(l - 1)}), \tag {7}
164
+ $$
165
+
166
+ where each element of $Z^{(l)} \in \{0,1\}^{D_{l - 1} \times D_{l - 1}}$ is a sample of Bernoulli random variable, representing whether or not the corresponding feature in $\mathrm{H}^{(l - 1)}$ is set to zero.
167
+
168
+ Such Bernoulli random sampling on features can also be treated as a sample from $P(\theta|\mathcal{G})$ [9], thus we can perform $T$ -times Monte-Carlo sampling (referred to Monte-Carlo dropout, MC-dropout) during inference to estimate $P(\theta|\mathcal{G})$ . At each time $t$ , a probability vector $\tilde{\mathrm{p}}_u^t = \tilde{\mathrm{p}}^t(y_u|x_u, A; \tilde{\theta}_t)$ can be obtained by performing forward pass under such a sample weight $\tilde{\theta}_t$ , i.e., $\tilde{\mathrm{p}}_u^t = f_{\tilde{\theta}^t}(x_u, A)$ .
169
+
170
+ However, from the perspective of the computational overhead and practical performance, we only conduct dropout on the last layer during MC-dropout. In other words, the probability vector $\tilde{\mathrm{p}}_u^t\in \tilde{\mathrm{P}}^t = f_{\tilde{\theta}^t}(\mathrm{X},\mathrm{A})$ at each time $t$ can be obtained by:
171
+
172
+ $$
173
+ \tilde {\mathrm {P}} ^ {t} = \sigma (\mathfrak {N} (\mathrm {A}) (\mathrm {Z} ^ {(t)} \odot \sigma (\mathfrak {N} (\mathrm {A}) \dots \sigma (\mathfrak {N} (\mathrm {A}) \mathrm {X W} ^ {(1)}) \dots) \mathrm {W} ^ {(l - 1)})) \mathrm {W} ^ {(l)}) \tag {8}
174
+ $$
175
+
176
+ 4.2.2 Droppedge Variational Inference. The droppedge variational inference takes a similar way with dropout variation inference, but imposes the randomness on the network topology instead.
177
+
178
+ Specifically, the $l$ -th layer output of $f_{\theta}$ with droedge can be written as:
179
+
180
+ $$
181
+ \mathrm {H} ^ {(l)} = \sigma (\mathfrak {N} (\mathrm {A} \odot \mathrm {Z} ^ {(l - 1)}) \mathrm {H} ^ {(l - 1)} \mathrm {W} ^ {(l - 1)}), \tag {9}
182
+ $$
183
+
184
+ where each element of $\mathrm{Z}^{(l)} \in \{0,1\}^{|\mathcal{V}| \times |\mathcal{V}|}$ is also a sample of Bernoulli random variable, representing whether or not the corresponding edge in A is removed.
185
+
186
+ Similarly, we only conduct dropedge on the last layer and perform $T$ -times Monte-Carlo sampling (referred to as Monte-Carlo dropedge) base on dropedge, where at each time $t$ , the probability vector $\tilde{\mathrm{p}}_u^t \in \tilde{\mathrm{P}}^t = f_{\tilde{\theta}^t}(\mathrm{X}, \mathrm{A})$ at each time $t$ is obtained by
187
+
188
+ $$
189
+ \tilde {\mathrm {P}} ^ {t} = \sigma (\Re (\mathrm {A} \odot \mathrm {Z} ^ {(t)}) \sigma (\Re (\mathrm {A}) \dots \sigma (\Re (\mathrm {A}) \mathrm {X W} ^ {(1)}) \dots) \mathrm {W} ^ {(l - 1)}) \mathrm {W} ^ {(l)}. \tag {10}
190
+ $$
191
+
192
+ 4.2.3 Information Gain Estimation. With such probability vector $\tilde{\mathbf{p}}_u^t$ obtained by Eq. 8 or Eq. 10, we can calculate the prediction distribution $\mathbf{p}_u^G$ by averaging all the $\tilde{\mathbf{p}}_u^t$ :
193
+
194
+ $$
195
+ \mathrm {p} _ {u} ^ {\mathcal {G}} = \mathrm {p} \left(y _ {u} | \mathrm {x} _ {u}, \mathrm {A}, \mathcal {G}\right) = \frac {1}{T} \sum_ {t = 1} ^ {T} \tilde {\mathrm {p}} _ {u} ^ {t}, \tilde {\theta} _ {t} \sim P (\theta | \mathcal {G}), \tag {11}
196
+ $$
197
+
198
+ and thus the information gain $\mathbb{B}_u$ can be calculated by:
199
+
200
+ $$
201
+ \mathbb {B} _ {u} \left(y _ {u}, \theta | \mathrm {x} _ {u}, \mathrm {A}, \mathcal {G}\right) = - \sum_ {d = 1} ^ {D} p _ {u, d} ^ {\mathcal {G}} \log p _ {u, d} ^ {\mathcal {G}} + \frac {1}{T} \sum_ {d = 1} ^ {D} \sum_ {t = 1} ^ {T} \tilde {p} _ {u, d} ^ {t} \log \tilde {p} _ {u, d} ^ {t}. \tag {12}
202
+ $$
203
+
204
+ Finally, we weight the loss function with above information gain after normalization:
205
+
206
+ $$
207
+ \begin{array}{l} \mathcal {L} _ {s t} = \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \bar {\mathbb {B}} _ {u} l (\bar {y} _ {u}, \mathrm {p} _ {u}) \\ + \frac {\left| \mathcal {V} _ {L} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {\left(v _ {i}, y _ {i}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l \left(y _ {i}, p _ {i}\right) \tag {13} \\ \end{array}
208
+ $$
209
+
210
+ where $\bar{\mathbb{B}}_u = \frac{\mathbb{B}_u}{\beta \cdot \frac{1}{|\mathcal{S}_U|} \sum_i \mathbb{B}_i}$ .
211
+
212
+ Here, we can tune the balance coefficient $\beta$ to recover the population distribution (i.e., $\mathcal{L}_{st} \approx \mathcal{L}_{pop}$ ) as much as possible.
213
+
214
+ # 4.3 Improving Qualities of Pseudo Labels via Loss Correction
215
+
216
+ Till now, we have addressed the distribution shift issue with an information gain weighted loss function, where more attentions are paid to nodes with high information gain rather than high confidence. Unfortunately, such a training pipeline still implies hidden risks. Specifically, considering that pseudo labels of hard nodes are more likely to be incorrect as shown in Fig. 1(a) and our DR-GST focuses more on hard nodes, the impact of incorrect pseudo-labeled nodes will be enlarged and even mislead the learning of GCNs. Previous works generally filter out these low-quality nodes with collaborative scoring [20, 28] or prefabricated assumption [38] in a relatively coarse-grained manner, where abundant nodes with high information gain are discarded in advance. Instead, motivated by studies on learning with noisy labels [10, 23, 27], we propose to incorporate loss correction strategy into graph self-training. In brief, DR-GST corrects the predictions of the student model in each iteration, so as to eliminate the negative impact of misleading pseudo labels from the teacher model.
217
+
218
+ Specifically, given a student model $f_{\bar{\theta}}$ trained by pseudo labels, the loss correction assumes there is a model $f_{\theta^*}$ trained by ground-truth labels and a transition matrix $T$ such that $f_{\bar{\theta}}$ can be represented by $f_{\bar{\theta}} = Tf_{\theta^*}$ , as shown in Fig. 3, where each element in $T \in \mathbb{R}^{c \times c}$ is a transition probability from the ground-truth label to the pseudo label, i.e., $T_{kj} = P(\bar{Y} = j|Y = k)$ and $c$ is the number of classes. With such a transition matrix, every model trained by pseudo labels is equal to that trained by ground-truth labels. We have proved the equivalence relation above using the following proposition.
219
+
220
+ PROPOSITION 4.2. Given a model $f_{\bar{\theta}}$ trained by pseudo labels and a model $f_{\theta^*}$ trained by ground-truth labels, assuming that there exists a transition matrix $T$ such that the equation $f_{\bar{\theta}}(\mathrm{x}_u, \mathrm{A}) = \mathrm{T}f_{\theta^*}(\mathrm{x}_u, \mathrm{A})$ holds for each node $v_u$ , then $\bar{\theta} = \theta^*$ if $T$ is a permutation matrix under cross entropy (CE) loss or $T$ is an arbitrary non-zero matrix under mean square error (MSE) loss.
221
+
222
+ Proof. Please refer to Appendix A.1.2.
223
+
224
+ Based on Proposition 4.2, ideally, we can train the student model regardless of the quality of labels, and recover $f_{\theta^*}$ with T. Specifically, as shown in Fig. 3, for each node $v_i \in \{\mathcal{V}_L \cup \mathcal{S}_U\}$ with its feature vector $\mathbf{x}_i$ , we first feed it into student model and multiply the output with T to get $f_{\overline{\theta}}(\mathbf{x}_i, \mathbf{A})$ . Then we use $f_{\overline{\theta}}(\mathbf{x}_i, \mathbf{A})$ to optimize the student model according to Eq. 13. Finally, at inference, we can treat the student model as $f_{\theta^*}$ . Please note that the transition matrix $T$ is pre-computed and not updated during optimization of the student model.
225
+
226
+ $$
227
+ \begin{array}{l} \text {w i t h o u t l o s s c o r r e c t i o n :} \xrightarrow {x _ {i}} \begin{array}{c} \text {s t u d e n t} \\ \text {m o d e l} \end{array} \xrightarrow {f _ {\bar {\theta}} (x _ {i} , A)} \mathcal {L} (A, X, \mathcal {Y} _ {L}) \\ \text {w i t h} \quad x _ {i} \xrightarrow {\text {s t u d e n t}} \quad f _ {\theta^ {*}} (x _ {i}, A) \quad \mathrm {T} \quad f _ {\bar {\theta}} (x _ {i}, A) \quad \mathcal {L} (A, X, y _ {L}) \\ \end{array}
228
+ $$
229
+
230
+ Figure 3: An illustration of loss correction.
231
+
232
+ Next, we make an illustration for the computation of the transition matrix $T$ . Noting that for each node $v_{i} \in \mathcal{V}_{L}$ with the ground-truth label $y_{i} = k$ , the probability $P(Y = k|X = x_{i})$ should be 1 since we definitely know its label to be $k$ . Therefore, given the output probability $p_{kj} = f_{\bar{\theta}}(\mathrm{x}_i,\mathrm{A})_j$ of class $j$ , we have
233
+
234
+ $$
235
+ \begin{array}{l} p _ {k j} = P (\bar {Y} = j | X = \mathrm {x} _ {i}) = \sum_ {m = 1} ^ {c} P (\bar {Y} = j | Y = m, X = \mathrm {x} _ {i}) P (Y = m | X = \mathrm {x} _ {i}) \\ = P \left(\bar {Y} = j \mid Y = k, X = \mathrm {x} _ {i}\right) \cdot 1 + 0 + \dots + 0 = T _ {k j} \left(\mathrm {x} _ {i}\right) = T _ {k j}. \tag {14} \\ \end{array}
236
+ $$
237
+
238
+ In others words, the output probability vector $f_{\bar{\theta}}(\mathrm{x}_i,\mathrm{A})$ of each node $v_{i}$ with its ground-truth label $k$ is the $k$ -th row of $\mathrm{T}$ , where $\bar{\theta}$ means such a model is trained with the augmented dataset $\mathcal{V}_L\bigcup \mathcal{S}_U$ .
239
+
240
+ Technically, we first train a student model $f_{\bar{\theta}}$ without loss correction using the augmented dataset $\mathcal{V}_L \cup \mathcal{S}_U$ , then update $T$ according to $p_{kj} = f_{\bar{\theta}}(\mathrm{x}_i, \mathrm{A})_j$ , and finally re-train a student model from scratch with loss correction to obtain $f_{\theta^*}$ .
241
+
242
+ Considering that there are multiple nodes belonging to class $k$ in $\mathcal{V}_L$ , we propose the following optimization problem to learn $T$ instead:
243
+
244
+ $$
245
+ \arg \min _ {\mathrm {T}} \sum_ {k = 1} ^ {c} \sum_ {j = 1} ^ {N _ {k} ^ {(L)}} | | \mathrm {T} _ {k,:} - f _ {\bar {\theta}} (\mathrm {x} _ {i}, \mathrm {A}) | | ^ {2} + | | \mathrm {T T} ^ {\mathsf {T}} - \mathrm {I} | | ^ {2}, \tag {15}
246
+ $$
247
+
248
+ where $N_k^{(L)}$ is the number of nodes belonging to class $k$ in $\mathcal{V}_L$ and I is an identity matrix. Since the improved CE loss is utilized as the loss function in this paper as mentioned in Eq. 1 and Eq. 13, we append the regularization term $\left\| \mathrm{TT}^{\mathrm{T}} - \mathrm{I}\right\|^{2}$ for guiding T to approximate to a permutation matrix, which is derived from Proposition 4.2 under the CE loss. Moreover, we initialize T with the identify matrix I at the very beginning.
249
+
250
+ # 4.4 Overview of DR-GST
251
+
252
+ Till now, we have elaborated our proposed DR-GST framework, which solves both the distribution shift and the low-quality pseudo labels with the help of information gain and loss correction. We summarize it in Algorithm 1 and further analyze its time complexity in Appendix A.2.
253
+
254
+ Given a graph $\mathcal{G} = (\mathcal{V},\mathcal{E},\mathrm{X})$ with its original labeled dataset $\nu_{L}$ , unlabeled dataset $\nu_{U}$ , adjacent matrix A as well as its label set $\nu_{L}$ , we first train a teacher model $f_{\theta}$ on $\nu_{L}$ to obtain the prediction $\bar{y}_u$ and the confidence $r_u$ for each unlabeled node $v_{u}\in \nu_{U}$ at line 1. then iterate steps from line 3 to 9 until convergence, where we call each iteration a stage following [28]. Specifically, at line 3 we select part of unlabeled nodes whose confidence $r_u$ is bigger than a given threshold $\tau$ to obtain $\mathcal{S}_U$ . Next at line 4 we pseudolabel each node $v_{u}\in \mathcal{S}_{U}$ with $\bar{y}_u$ to augment $\nu_{L}$ . Then at line 5 we calculate the information gain $\mathbb{B}_u$ according to dropout or dropedge
255
+
256
+ variational inference in Section 4.2 and normalize it according to Eq. 13. With such information gain, we train a student model $f_{\bar{\theta}}$ at line 6 using the augmented dataset, where pseudo labels may be incorrect. Therefore, at line 7 we update the transition matrix $\mathrm{T}$ with the output probability vector of $f_{\bar{\theta}}$ of each node $v_{i} \in \mathcal{V}_{L}$ according to Eq. 15, and retrain the student model from scratch at line 8 with $f_{\bar{\theta}} = \mathrm{T}f_{\theta^{*}}$ to get $f_{\theta^{*}}$ . Finally, we replace the teacher model $f_{\theta}$ with $f_{\theta^{*}}$ and repeat above steps until convergence.
257
+
258
+ # Algorithm 1 The DR-GST Framework
259
+
260
+ Input: Graph $\mathcal{G} = (\mathcal{V},\mathcal{E},\mathrm{X})$ , original labeled dataset $\nu_{L}$ , unlabeled dataset $\nu_{U}$ , adjacent matrix A, label set $\nu_{L}$ , transition matrix $\mathrm{T} = \mathrm{I}$
261
+
262
+ Output: Probability vector $\mathfrak{p}_i$ for each node $v_{i}$
263
+
264
+ 1: Train a teacher model $f_{\theta}$ on $\mathcal{V}_L$ to obtain the prediction $\bar{y}_u$ and the confidence $r_u$ for each unlabeled node $v_u \in \mathcal{V}_U$ ;
265
+ 2: for each stage $k$ do
266
+ 3: Select part of unlabeled nodes according to $r_u$ to get $S_U$ ;
267
+ 4: Pseudo-labeling each node $v_{u} \in S_{U}$ with $\bar{y}_{u}$ ;
268
+ 5: Calculate the information gain $\mathbb{B}_u$ according to Eq. 12;
269
+ 6: Train a student model $f_{\bar{\theta}}$ without $T$ according to Eq. 13;
270
+ 7: Update T using $f_{\bar{\theta}}(\mathbf{x}_i, \mathrm{A})$ of $v_i \in \mathcal{V}_L$ according to Eq. 15;
271
+ 8: Retrain a student model from scratch according to Eq. 13 with $f_{\overline{\theta}} = \mathrm{T}f_{\theta^{*}}$ to get $f_{\theta^{*}}$ ;
272
+ 9: Replace the teacher model $f_{\theta}$ with the student model $f_{\theta^{*}}$ .
273
+ 10: end for
274
+ 11: return $\mathrm{p}_i = f_{\theta^*}(\mathrm{x}_i, \Lambda)$ in the final stage.
275
+
276
+ # 4.5 Theoretical Analysis
277
+
278
+ In this section, we theoretically analyze the influence factors on self-training from the perspective of gradient descent, and our theorem below demonstrates the rationality of the whole DR-GST framework.
279
+
280
+ THEOREM 4.3. Assuming that $||\nabla_{\theta}l(y_i,\mathfrak{p}_i)||\leqslant \Psi$ for each node $v_{i}$ , where $\Psi$ is a constant, given $\nabla_{\theta}\mathcal{L}_{pop}$ and $\nabla_{\theta}\mathcal{L}_{st}$ , the gradient of $\mathcal{L}_{pop}$ and $\mathcal{L}_{st}$ w.r.t. model parameters $\theta$ , the following bound between $\nabla_{\theta}\mathcal{L}_{pop}$ and $\nabla_{\theta}\mathcal{L}_{st}$ holds:
281
+
282
+ $$
283
+ \begin{array}{l} \left| \left| \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} \right| \right| \leqslant \frac {\left| \mathcal {S} _ {U} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \Psi (2) \left| \left| P _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \left(\bar {y} _ {u} \neq y _ {u}\right) \right| \right| \\ + \left\| P _ {s t} (\mathcal {V}, \mathcal {Y}) - P _ {p o p} (\mathcal {V}, \mathcal {Y}) \right\|. \tag {16} \\ \end{array}
284
+ $$
285
+
286
+ Proof. Please refer to Appendix A.1.3.
287
+
288
+ From the Theorem 4.3 we can conclude that the performance of self-training is negatively related to the difference $||P_{st}(\mathcal{V},\mathcal{Y}) - P_{pop}(\mathcal{V},\mathcal{Y})||$ between the two distributions as well as the error rate $||P_{(v_u,y_u)\sim P_{pop}(\mathcal{V},\mathcal{Y})}(\bar{y}_u\neq y_u)||$ of pseudo labels. Meanwhile, we find our proposed DR-GST is a natural framework equipped with two designs to correspondingly address the issues in self-training: information gain weighted loss function for distribution recovery and loss correction strategy for improving qualities of pseudo labels. This analysis further demonstrates the rationality of DR-GST framework from the theoretical perspective.
289
+
290
+ # 5 EXPERIMENT
291
+
292
+ In this section, we evaluate the effectiveness of DR-GST framework on semi-supervised node classification task with five widely used benchmark datasets from citation networks [3, 26] (i.e., Cora, Cite-seer, Pubmed and CoraFull) and social networks [21] (i.e., Flickr). More detailed descriptions about datasets are in Appendix A.3.1.
293
+
294
+ # 5.1 Experimental Setup
295
+
296
+ 5.1.1 Baselines. We compare our proposed DR-GST framework with two categories of baselines, including three representative GCNs (i.e., GCN [17], GAT [30], PPNP [18]) and three graph self-training frameworks (i.e., STs [20], M3S [28], ABN [38]). Noting that STs includes four variants (i.e., Self-Training, Co-Training, Union and Intersection) in the original paper and the best performance is reported in our experiments. The implementation of DR-GST and all the baselines can be seen in Appendix A.3.2. More detailed experimental environment can be seen in Appendix A.3.3.
297
+
298
+ 5.1.2 Evaluation Protocol. To more comprehensively evaluate our model, for all the datasets, we arrange only a few (including 3, 5, 10, 20) labeled nodes per class $(L / C)$ for the training set following [20]. Specifically, in the setting $L / C = 20$ , we follow the standard split [26] for Cora, Citeseer and Pubmed, and manually select 20 labeled nodes per class for CoraFull and Flickr considering the lack of standard split. In the setting $L / C < 20$ , we make 10 random splits for each $L / C$ , where each random split represents that we randomly select part of nodes from the training set of $L / C = 20$ . For all the methods and all the cases, we run 10 times and report the mean accuracy.
299
+
300
+ # 5.2 Overall Comparison on Node Classification
301
+
302
+ The performance of different methods on node classification are summarized in Table 1. We have the following observations.
303
+
304
+ - Our proposed DR-GST framework outperforms all the baselines by a considerable margin across most cases of all the datasets. The results demonstrate the effectiveness of DR-GST by adopting a more principled mechanism to make use of unlabeled nodes in graph for boosting classification performance.
305
+ - With the decrease of labeled nodes, we observe that the performance of GCNs (i.e., GCN, GAT and APPNP) drops quickly. For clarity, we further illustrate the changing trend of accuracy w.r.t. $L / C$ in Fig. 4. Obviously, we can discover the larger performance margin between DR-GST and GCNs with fewer labeled nodes per class, which further implies the superior capacity of DR-GST for addressing labeled data scarcity on graph learning.
306
+ - Considering the two variants of DR-GST, we find that $\mathrm{DR - GST}_{do}$ performs better on Pubmed, CoraFull and Flickr while $\mathrm{DR - GST}_{de}$ on Cora and Citeseer. An intuitive explanation for such distinct performance is the different emphasis on network topology and feature information w.r.t. different graphs for node classification task. Correspondingly, in DR-GST framework, MC-dropedge performs information gain estimation with network topology while MC-dropout is based on feature information. This finding also sheds light on possible future work to combine both topology and feature to further enhance performance under our framework.
307
+
308
+ Table 1: Node classification results(%) (L/C: the number of labels per class; bold: best)
309
+
310
+ <table><tr><td>Dataset</td><td colspan="4">Cora</td><td colspan="4">Citeseer</td><td colspan="4">Pubmed</td><td colspan="4">CoraFull</td><td colspan="4">Flickr</td></tr><tr><td>L/C</td><td>3</td><td>5</td><td>10</td><td>20</td><td>3</td><td>5</td><td>10</td><td>20</td><td>3</td><td>5</td><td>10</td><td>20</td><td>3</td><td>5</td><td>10</td><td>20</td><td>3</td><td>5</td><td>10</td><td>20</td></tr><tr><td>GCN</td><td>64.52</td><td>69.55</td><td>78.03</td><td>81.56</td><td>51.39</td><td>61.34</td><td>68.39</td><td>71.64</td><td>66.04</td><td>71.25</td><td>75.88</td><td>79.31</td><td>41.83</td><td>49.12</td><td>55.67</td><td>60.69</td><td>37.69</td><td>40.64</td><td>48.04</td><td>51.74</td></tr><tr><td>GAT</td><td>67.19</td><td>69.45</td><td>76.38</td><td>82.24</td><td>55.19</td><td>59.40</td><td>67.61</td><td>72.00</td><td>67.85</td><td>68.41</td><td>72.42</td><td>78.38</td><td>36.44</td><td>46.70</td><td>52.45</td><td>57.97</td><td>20.02</td><td>24.90</td><td>33.27</td><td>37.06</td></tr><tr><td>APPNP</td><td>65.06</td><td>75.53</td><td>81.33</td><td>83.14</td><td>51.22</td><td>60.48</td><td>68.50</td><td>71.64</td><td>65.77</td><td>73.01</td><td>76.35</td><td>79.51</td><td>40.29</td><td>44.49</td><td>50.89</td><td>60.77</td><td>24.76</td><td>35.54</td><td>47.87</td><td>61.55</td></tr><tr><td>STs</td><td>70.68</td><td>75.60</td><td>80.35</td><td>82.89</td><td>56.29</td><td>65.59</td><td>74.17</td><td>74.36</td><td>69.82</td><td>73.77</td><td>77.68</td><td>81.02</td><td>43.44</td><td>51.16</td><td>58.40</td><td>61.70</td><td>35.21</td><td>43.25</td><td>48.23</td><td>52.99</td></tr><tr><td>M3S</td><td>64.24</td><td>71.02</td><td>78.93</td><td>82.78</td><td>50.07</td><td>63.28</td><td>74.54</td><td>74.72</td><td>68.76</td><td>69.21</td><td>70.72</td><td>81.34</td><td>42.77</td><td>49.75</td><td>57.43</td><td>61.40</td><td>35.33</td><td>39.02</td><td>47.62</td><td>51.87</td></tr><tr><td>ABN</td><td>66.39</td><td>73.07</td><td>78.73</td><td>81.79</td><td>54.30</td><td>64.27</td><td>69.90</td><td>72.81</td><td>59.17</td><td>71.40</td><td>75.26</td><td>79.09</td><td>43.38</td><td>48.39</td><td>55.88</td><td>60.62</td><td>35.13</td><td>41.62</td><td>47.01</td><td>52.10</td></tr><tr><td>DR-GSTdo</td><td>70.85</td><td>77.92</td><td>80.88</td><td>83.34</td><td>59.39</td><td>69.08</td><td>75.00</td><td>75.78</td><td>70.74</td><td>74.63</td><td>78.44</td><td>81.08</td><td>45.44</td><td>53.29</td><td>60.01</td><td>62.75</td><td>37.84</td><td>43.47</td><td>49.48</td><td>53.66</td></tr><tr><td>DR-GSTde</td><td>73.43</td><td>77.59</td><td>81.67</td><td>84.03</td><td>60.60</td><td>69.91</td><td>74.65</td><td>75.26</td><td>70.55</td><td>73.71</td><td>77.42</td><td>80.65</td><td>45.42</td><td>52.50</td><td>59.16</td><td>63.11</td><td>38.21</td><td>43.28</td><td>49.44</td><td>53.05</td></tr></table>
311
+
312
+ - Among the two categories of baselines, self-training frameworks (i.e., STs, M3S and ABN) can generally improve GCNs (i.e., GCN, GAT and APPNP), which indicates the usefulness of unlabeled data. Nevertheless, DR-GST still yields better performance for the following two promising designs: 1) We pay more attention on nodes with high information gain rather than high confidence, so that the unsatisfying distribution shift issue is avoided. 2) We adopt a loss correction strategy, where qualities of pseudo labels are improved for subsequent self-training.
313
+
314
+ # 5.3 In-depth Analysis of DR-GST
315
+
316
+ In this section, we make a series of analysis to better understand each component in DR-GST, as well as key parameter selections.
317
+
318
+ 5.3.1 Ablation Study. As mentioned above, the performance of self-training theoretically hinges on the distribution gap and qualities of pseudo labels, which could be naturally captured by our DR-GST framework with two corresponding designs: the information-gain based weighted loss function and loss correction module. To comprehensively understand their contributions towards self-training on graphs, we prepare following three variants of DR-GST:
319
+
320
+ - DR-GST-lc: DR-GST only with the loss correction module, i.e., $\overline{\mathbb{B}} = 1$ for all the unlabeled nodes.
321
+ - DR-GST-ig: DR-GST only with the information gain weighted loss function.
322
+ - DR-GST-w/o: DR-GST without the above two designs.
323
+
324
+ The results on $\mathrm{DR - GST}_{do}$ and $\mathrm{DR - GST}_{de}$ are respectively reported in Fig. 5 and Fig. 6 From the results we can find that the overall performance order is as follows: DR-GST $>$ DR-GST-ig $>$ DR-GST-lc $>$ DR-GST-w/o. There are three conclusions here. Firstly, the best performance achieved by the complete DR-GST framework indicates the effectiveness of considering two components together. Secondly, the information gain weighted loss function and loss correction are both value modules for self-training on graphs. Thus, ignoring them altogether (i.e., DR-GST-w/o) is not ideal. Thirdly, the information-gain weighted loss function plays a more vital role in our self-training framework since DR-GST-lc generally does not perform as well as DR-GST-ig. In short, above findings further verify the rationality of DR-GST from the empirical perspective.
325
+
326
+ 5.3.2 Parameter Study. Here, we investigate into the sensitivity of two hyper-parameters (i.e., threshold $\tau$ and balance coefficient $\beta$ ) on Cora and CoraFull datasets. Similar observations are also
327
+
328
+ ![](images/73b6d8c5511b4316ad61f755dd68abb4a31be50b5e7e5d87e355ca90cee18271.jpg)
329
+ Figure 4: The changing trends of accuracy w.r.t. $L / C$
330
+
331
+ ![](images/59396543faa6f44a44372b17755d5178af85ffde875e5900b3c6224070047515.jpg)
332
+
333
+ ![](images/664cdd3aaa286cb669e437231fae487ce381864322dbc18575b72647fff1f8ca.jpg)
334
+ Figure 5: Ablation study of $\mathbf{DR} - \mathbf{GST}_{do}$
335
+
336
+ made on other datasets. In particular, we respectively report the performance of DR-GST $_{do}$ and DR-GST $_{de}$ , and vary the $L/C$ in $\{3, 5, 10\}$ . For clear notation in figures, we use "do-3" to denote DR-GST $_{do}$ with $L/C = 3$ , and the rest can be done in the same manner.
337
+
338
+ Analysis of threshold $\tau$ in self-training We test the impact of threshold $\tau$ in self-training, and vary it from 0.40 to 0.70 for Cora and 0.60 to 0.90 for CoraFull. The results are summarized in Fig
339
+
340
+ ![](images/01ebc9f132784cb9813c4827ceade1177ceb9572085d9ae554460dda6ce31b8f.jpg)
341
+
342
+ ![](images/57f0e43039ebd922fc51c7ede98b5b7e2ae984a16bcc540f6bc9ab4b7ad4925a.jpg)
343
+
344
+ ![](images/75b2115096073df9486d8d925a435183ff09094d31c05151889ad388dc3d7f42.jpg)
345
+ Figure 6: Ablation study of DR-GSTde.
346
+
347
+ ![](images/af562112ef3fcf862993d778cfd4d428baae35616d33401b9b07ee8da89aa755.jpg)
348
+ Figure 9: Visualization of learned embeddings for unlabeled nodes $((\mathbf{a})\sim (\mathbf{c}))$ and test nodes $((\mathbf{d})\sim (\mathbf{f}))$ on Cora at different stages during self-training.
349
+
350
+ ![](images/b437c977501356b52811eb8eb87319bce8073e679ec41fe58b98e707903680ed.jpg)
351
+ Figure 7: Impact of threshold $\tau$ .
352
+
353
+ ![](images/ed943db40cbb71949934c9b7c21215e15668d47fa50d5efc6fed8541816091cd.jpg)
354
+
355
+ ![](images/8aefdea9dfe87973a3e9afd16c1bed87a7cbb64263b3c746dd0eabbfcb56d9cf.jpg)
356
+ Figure 8: Impact of balance coefficient $\beta$ .
357
+
358
+ ![](images/e510d626d78c3e3e3940297e69b16a7b2be77dcb068501ae93b0704769e6d28b.jpg)
359
+
360
+ .7. Generally speaking, the best performance is achieved when we set a smaller $\tau$ , which is consistent with our analysis above that high-confidence unlabeled nodes contribute less.
361
+
362
+ Analysis of balance coefficient $\beta$ We then test the impact of the balance coefficient $\beta$ in Eq. 13, and vary it from $1/3$ to $1$ . The results are shown in Fig. 8. Obviously, with the increase of $\beta$ , or, in other words, with more attention paid to hard nodes, the performance shows a downward trend, further demonstrating the effectiveness of our design.
363
+
364
+ 5.3.3 Visualization. For a more intuitive of the proposed information gain based DR-GST, we conduct the task of visualization on Cora dataset. Specifically, as shown in Fig. 9, we visualize the output embedding of the student model at different stages in DR-GST for Cora dataset. From Fig. 9(a) to Fig. 9(c) we show the visualization of unlabeled nodes, where a lighter dot represents a node endowed with a higher weight by information gain when calculating the loss
365
+
366
+ ![](images/e0ce5098eb96c45872ddd84acf4151dec06bb97cc5d9479883e0a07aa733f2e5.jpg)
367
+ (a) ${stage} = 1$
368
+
369
+ ![](images/cd99b2c24defb55ac89bfcf96eee79404611adf7c9c869f31e499d1ee69f1659.jpg)
370
+ (b) ${stag} = 2$
371
+
372
+ ![](images/49c93280f7af7148b80d9f3de23668fef11ea65e9d1ebdc5f70328a459551311.jpg)
373
+ (c) ${stag} = 3$
374
+
375
+ ![](images/d423ab2eb0da519430ba798747757842be20cd40e07a75c79be0a83bc73aa63a.jpg)
376
+ (d) $stage = 1$
377
+
378
+ ![](images/787300a8dabd46573399e45f281daaf0db34f6db12bca877f296e86ac3afa45d.jpg)
379
+ (e) $stage = 2$
380
+
381
+ ![](images/11794378f30f72dcb116ec51ed2014e8d6498562b666bdc904f81f8e32365ea6.jpg)
382
+ (f) $stage = 3$
383
+
384
+ function in Eq. 13. Obviously, we can discover that at an earlier stage, DR-GST pays more attention to nodes close to the decision boundary which is also indistinct at this moment. With the training progress going on, the light nodes gradually vanish, implying that most of information these nodes contain has been learned, leading to a more crisp decision boundary. From Fig. 9(d) to Fig. 9(f) we show the visualization of test nodes, where different colors represent different classes. Apparently, the separability of different classes for test nodes is gradually improved, further demonstrating the effectiveness of DR-GST for optimizing the decision boundary.
385
+
386
+ # 6 RELATED WORK
387
+
388
+ In line with the main focus of our work, we review the most related work in graph neural networks and self-training.
389
+
390
+ Graph Neural Networks Recent years have seen a surge of efforts on Graph Neural Networks (GNNs) and achieved state-of-the-art performance in various tasks on graphs [33, 36]. Generally, current GNNs can be divided into two categories. The first category is spectral-based GNNs, which defines graph convolution operation in the spectral domain [4, 7]. The well-known GCN [17] simplifies graph convolutions by using the 1-order approximation. Since then, plenty of studies have sprung up. SGC [32] further simplifies GCN by removing the nonlinearities between GCN layers. [20] shows that GCNs smooth node features between neighbours. On the comparison, the other category is spatial-based GNNs, mainly devoted to aggregating and transforming the local information from the perspective of spatial domain. GAT [30] assigns the learnt weight to each edge during aggregation. [12] proposes a permutation-invariant aggregator for message passing. Moreover, there are many other graph neural models, we please refer the readers to recent surveys [34, 37] for a more comprehensive review.
391
+
392
+ Self-training Despite the success, GNNs typically require large amounts of labeled data, which is expensive and time-consuming. Self-training [16] is one of the earliest strategies addressing labeled data scarcity by making better use of abundant unlabeled data, and has shown remarkable performance on various tasks [14, 19, 22].
393
+
394
+ Recently, [20] proposes a graph-based self-training framework, demonstrating the effectiveness of self-training on graphs. Further, [28] utilizes the DeepCluster [5] to filter out low-quality pseudo labels during self-training. CaGCN-st [31] argues that self-training under-performs due to generally overlooked low-confidence but high-accuracy predictions, and proposes a confidence-calibrated self-training framework. [38] proposes to select high-quality unlabeled nodes via an adaptive pseudo labeling technique. [24] utilizes a margin prediction confidence to select unlabeled nodes, aiming at identifying the most confident labels. In summary, almost all of graph self-training methods focus on improving the quality of pseudo labels by virtue of confidence, but none of them have ever considered the capability and limitation of such selection criterion.
395
+
396
+ # 7 CONCLUSION
397
+
398
+ In this paper, we empirically make a thorough study for capability and limitation of current self-training methods on graphs, and surprisingly find they may be cheated by confidence and even suffer from the distribution shift issue, leading to unpromising performance. To this end, we propose a novel self-training framework DR-GST which not only addresses the distribution shift issue from the view of information gain, but also is equipped with the creative loss correction strategy for improving qualities of pseudo labels. Theoretical analysis and extensive experiments well demonstrate the effectiveness of the proposed DR-GST. Moreover, our study also gives an insight that confidence alone is not enough for self-training and thus motivates us an interesting direction for future work, i.e., exploiting more criteria for the selection of unlabeled nodes during self-training.
399
+
400
+ # 8 ACKNOWLEDGMENTS
401
+
402
+ This work is supported in part by the National Natural Science Foundation of China (No. U20B2045, 62192784, 62172052, 61772082, 62002029, U1936104), the Fundamental Research Funds for the Central Universities 2021RC28 and CCF-Ant Group Research Fund.
403
+
404
+ # REFERENCES
405
+
406
+ [1] Antonin Berthon, Bo Han, Gang Niu, Tongliang Liu, and Masashi Sugiyama. 2021. Confidence scores make instance-dependent label-noise learning possible. In ICML. 825-836.
407
+ [2] Charles Blundell, Julien Cornebise, Koray Kavukcuoglu, and Daan Wierstra. 2015. Weight uncertainty in neural network. In ICML. 1613-1622.
408
+ [3] Aleksandar Bojchevski and Stephan Gunnemann. 2018. Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via Ranking. In ACLR.
409
+ [4] Joan Bruna, Wojciech Zaremba, Arthur Szlam, and Yann LeCun. 2014. Spectral Networks and Locally Connected Networks on Graphs. In ICLR.
410
+ [5] Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. 2018. Deep clustering for unsupervised learning of visual features. In ECCV. 132-149.
411
+ [6] Thomas M Cover. 1999. Elements of information theory. John Wiley & Sons.
412
+ [7] Michael Defferrard, Xavier Bresson, and Pierre Vandergheynst. 2016. Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering. In NIPS. 3837-3845.
413
+ [8] Jiali Duan, Yen-Liang Lin, Son Dinh Tran, Larry S. Davis, and C.-C. Jay Kuo. 2020. SLADE: A Self-Training Framework for Distance Metric Learning. In CVPR 9644-9653.
414
+ [9] Yarin Gal and Zoubin Ghahramani. 2016. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In ICML. 1050-1059.
415
+ [10] Jacob Goldberger and Ehud Ben-Reuven. 2017. Training deep neural-networks using a noise adaptation layer. In *ICLR*.
416
+ [11] Alex Graves. 2011. Practical variational inference for neural networks. In NIPS. 2348-2356.
417
+ [12] William L Hamilton, Rex Ying, and Jure Leskovec. 2017. Inductive representation learning on large graphs. In NIPS. 1025-1035.
418
+
419
+ [13] Arman Hasanzadeh, Ehsan Hajiramezanali, Shahin Boluki, Mingyuan Zhou, Nick Duffield, Krishna Narayanan, and Xiaoning Qian. 2020. Bayesian graph neural networks with adaptive connection sampling. In ICML 4094-4104.
420
+ [14] Junxian He, Jiatao Gu, Jiajun Shen, and Marc'Aurelio Ranzato. 2020. Revisiting Self-Training for Neural Sequence Generation. In ICLR.
421
+ [15] Jose Hernandez-Lobato, Yingzhen Li, Mark Rowland, Thang Bui, Daniel Hernandez-Lobato, and Richard Turner. 2016. Black-box alpha divergence minimization. In ICML, 1511-1520.
422
+ [16] H. J. Scudder III. 1965. Probability of error of some adaptive pattern-recognition machines. IEEE Transactions on Information Theory 11, 3 (1965), 363-371.
423
+ [17] Thomas N. Kipf and Max Welling, 2017. Semi-Supervised Classification with Graph Convolutional Networks. In ICLR.
424
+ [18] Johannes Klicpera, Aleksandar Bojchevski, and Stephan Gunnemann. 2019. Predict then Propagate: Graph Neural Networks meet Personalized PageRank. In ICLR.
425
+ [19] Dong-Hyun Lee et al. 2013. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In ICML Workshop.
426
+ [20] Qimai Li, Zhichao Han, and Xiao-Ming Wu. 2018. Deeper Insights Into Graph Convolutional Networks for Semi-Supervised Learning. In AAAI. 3538-3545.
427
+ [21] Zaiqiao Meng, Shangsong Liang, Hongyan Bao, and Xiangliang Zhang. 2019. Co-embedding attributed networks. In WSDM. 393-401.
428
+ [22] Subhabrata Mukherjee and Ahmed Awadallah. 2020. Uncertainty-aware self-training for few-shot text classification. In NIPS.
429
+ [23] Giorgio Patrini, Alessandro Rozza, Aditya Krishna Menon, Richard Nock, and Lizhen Qu. 2017. Making deep neural networks robust to label noise: A loss correction approach. In CVPR. 1944-1952.
430
+ [24] Daniel Carlos Guimarães Pedronette and Longin Jan Latecki. 2021. Rank-based self-training for graph convolutional networks. Information Processing & Management 58, 2 (2021), 102443.
431
+ [25] Yu Rong, Wenbing Huang, Tingyang Xu, and Junzhou Huang. 2019. Dropedge: Towards deep graph convolutional networks on node classification. arXiv preprint arXiv:1907.10903 (2019).
432
+ [26] Prithviraj Sen, Galileo Namata, Mustafa Bilgic, Lise Getoor, Brian Galligher, and Tina Eliassi-Rad. 2008. Collective classification in network data. AI magazine 29, 3 (2008), 93-93.
433
+ [27] Sainbayar Sukhbaatar, Joan Bruna, Manohar Paluri, Lubomir Bourdev, and Rob Fergus. 2014. Training convolutional networks with noisy labels. arXiv preprint arXiv:1406.2080 (2014).
434
+ [28] Ke Sun, Zhouchen Lin, and Zhanxing Zhu. 2020. Multi-Stage Self-Supervised Learning for Graph Convolutional Networks on Graphs with Few Labeled Nodes. In AAAI 5892-5899.
435
+ [29] Laurens Van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-SNE. Journal of machine learning research 9, 11 (2008).
436
+ [30] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. *ICLR*.
437
+ [31] Xiao Wang, Hongrui Liu, Chuan Shi, and Cheng Yang. 2021. Be Confident! Towards Trustworthy Graph Neural Networks via Confidence Calibration. Advances in Neural Information Processing Systems 34 (2021).
438
+ [32] Felix Wu, Amauri Souza, Tianyi Zhang, Christopher Fifty, Tao Yu, and Kilian Weinberger. 2019. Simplifying graph convolutional networks. In ICML. 6861-6871.
439
+ [33] Zonghan Wu, Shirui Pan, Fengwen Chen, Guodong Long, Chengqi Zhang, and S Yu Philip. 2020. A comprehensive survey on graph neural networks. IEEE transactions on neural networks and learning systems 32, 1 (2020), 4-24.
440
+ [34] Zonghan Wu, Shirui Pan, Fengwen Chen, Guodong Long, Chengqi Zhang, and S Yu Philip. 2020. A comprehensive survey on graph neural networks. IEEE transactions on neural networks and learning systems 32, 1 (2020), 4-24.
441
+ [35] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks. In *ICLR*.
442
+ [36] Ziwei Zhang, Peng Cui, and Wenwu Zhu. 2020. Deep learning on graphs: A survey. IEEE Transactions on Knowledge and Data Engineering (2020).
443
+ [37] Jie Zhou, Ganqu Cui, Shengding Hu, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2020. Graph neural networks: A review of methods and applications. AI Open 1 (2020), 57-81.
444
+ [38] Ziang Zhou, Jieming Shi, Shengzhong Zhang, Zengfeng Huang, and Qing Li. 2019. Effective Semi-Supervised Node Classification on Few-Labeled Graph Data. arXiv preprint arXiv:1910.02684 (2019).
445
+
446
+ # A SUPPLEMENT
447
+
448
+ In the supplement, we first provide detailed proofs of import theorems in our paper i.e., Theorem 4.1, Proposition 4.2 and Theorem 4.3. Next, more experimental details are represented for reproduction.
449
+
450
+ # A.1 Proof
451
+
452
+ In this section, we successively show the detailed proof for Theorem 4.1, Proposition 4.2 and Theorem 4.3.
453
+
454
+ # A.1.1 Proof of Theorem 4.1.
455
+
456
+ Proof. With our assumption that $\bar{y}_u = y_u$ for each pseudolabeled node $v_{u}\in S_{U}$ , we first rewrite $\mathcal{L}_{pop}$ in Eq. 3 as:
457
+
458
+ $$
459
+ \begin{array}{l} \mathcal {L} _ {p o p} = \frac {\left| \mathcal {S} _ {U} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\nu , \nu)} l (\bar {y} _ {u}, \mathrm {p} _ {u}) \tag {17} \\ + \frac {| \mathcal {V} _ {L} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {i}, y _ {i}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l (y _ {i}, \mathrm {p} _ {i}). \\ \end{array}
460
+ $$
461
+
462
+ Note that
463
+
464
+ $$
465
+ \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) = \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \frac {P _ {p o p} \left(v _ {u} , y _ {u}\right)}{P _ {s t} \left(v _ {u} , y _ {u}\right)} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right), \tag {18}
466
+ $$
467
+
468
+ then we can rewrite Eq. 17 as
469
+
470
+ $$
471
+ \begin{array}{l} \mathcal {L} _ {p o p} = \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \frac {P _ {p o p} (v _ {u} , y _ {u})}{P _ {s t} (v _ {u} , y _ {u})} l (\bar {y} _ {u}, \mathrm {p} _ {u}) \\ + \frac {\left| \mathcal {V} _ {L} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {(v _ {i}, y _ {i}) \sim P _ {p o p} (\nu , \nu)} l (y _ {i}, p _ {i}) \tag {19} \\ = \frac {\left| \mathcal {S} _ {U} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {s t} \left(\mathcal {V}, \mathcal {Y}\right)} \gamma_ {u} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) \\ + \frac {| \mathcal {V} _ {L} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {i}, y _ {i}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} l (y _ {i}, \mathrm {p} _ {i}), \\ \end{array}
472
+ $$
473
+
474
+ where $\gamma_{u}$ can be regarded as a weight of the loss function for each pseudo-labeled node $v_{u}$ .
475
+
476
+ Finally, recalling the loss function under the distribution shift case in Eq. 4, i.e.,
477
+
478
+ $$
479
+ \begin{array}{l} \mathcal {L} _ {s t} = \frac {\left| \mathcal {V} _ {L} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \mathbb {E} _ {\left(v _ {i}, y _ {i}\right) \sim P _ {p o p} (\nu , \mathcal {Y})} l \left(y _ {i}, \mathrm {p} _ {i}\right) \tag {20} \\ + \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} l (\bar {y} _ {u}, \mathrm {p} _ {u}), \\ \end{array}
480
+ $$
481
+
482
+ we can find that it is definitely equal to that in Eq. 3 with an additional weight coefficient. In other words, we can recover the population distribution as long as we weight each pseudo-labeled node with a proper coefficient in $\mathcal{L}_{st}$ .
483
+
484
+ # A.1.2 Proof of Proposition 4.2.
485
+
486
+ Proof. Without loss of generality, we respectively prove the equality of $\theta^{*}$ and $\bar{\theta}$ under MSE loss and CE loss.
487
+
488
+ MSE loss. Under the MSE loss, with our non-zero assumption for $\mathrm{T}$ , the following equation holds true:
489
+
490
+ $$
491
+ \begin{array}{l} \theta^ {*} = \arg \min _ {\theta^ {*} \in \Theta} \sum_ {u} | | f _ {\theta^ {*}} (\mathrm {x} _ {u}, \mathrm {A}) - \mathrm {y} _ {u} | | ^ {2} \\ = \arg \min _ {\theta^ {*} \in \Theta} \sum_ {u} \left\| \mathrm {T} f _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right) - \mathrm {T y} _ {u} \right\| ^ {2} \tag {21} \\ = \arg \min _ {\bar {\theta} \in \Theta} \sum_ {u} | | f _ {\bar {\theta}} (\mathrm {x} _ {u}, \mathrm {A}) - \bar {\mathrm {y}} _ {u} | | ^ {2} = \bar {\theta}, \\ \end{array}
492
+ $$
493
+
494
+ where $y_{u}$ is a one-hot vector expanded from $y_{u}$ . The proof is concluded for MSE loss.
495
+
496
+ CE loss. Under CE loss, we prove the equality of $\bar{\theta}$ and $\theta^{*}$ from the perspective of gradient descent. Specifically, if for each node $v_{u}$ , the gradient of $f_{\bar{\theta}}(\mathrm{x}_u,\mathrm{A})$ w.r.t. $\bar{\theta}$ is equal to that of $f_{\theta^{*}}(\mathrm{x}_u,\mathrm{A})$ w.r.t. $\theta^{*}$ , then optimizing a model $f_{\bar{\theta}}$ using gradient descent will definitely leads to our desired model $f_{\theta^{*}}$ , that is to say, $\bar{\theta} = \theta^{*}$ .
497
+
498
+ Specifically, for each node $v_{u}$ , we first rewrite the CE loss as follows:
499
+
500
+ $$
501
+ l \left(\mathrm {y} _ {u}, \mathrm {p} _ {u}\right) = \mathrm {y} _ {u} ^ {\top} \log f _ {\theta} \left(\mathrm {x} _ {u}, \mathrm {A}\right). \tag {22}
502
+ $$
503
+
504
+ Then the difference $d$ of gradient between $\bar{\theta}$ and $\theta^{*}$ can be written as:
505
+
506
+ $$
507
+ d = \left\| \nabla_ {\theta} \bar {y} _ {u} ^ {\mathsf {T}} \log f _ {\bar {\theta}} \left(\mathrm {x} _ {u}, \mathrm {A}\right) - \nabla_ {\theta} \mathrm {y} _ {u} ^ {\mathsf {T}} \log f _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right) \right\| \tag {23}
508
+ $$
509
+
510
+ Considering our assumption that $f_{\overline{\theta}}(\mathrm{x}_u,\mathrm{A}) = \mathrm{T}f_{\theta^*}(\mathrm{x}_u,\mathrm{A}),$ Eq. 23 becomes:
511
+
512
+ $$
513
+ d = \left\| \nabla_ {\theta} \left(\mathrm {T y} _ {u}\right) ^ {\top} \log \left(\mathrm {T f} _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right)\right) - \nabla_ {\theta} \mathrm {y} _ {u} ^ {\top} \log f _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right) \right\| \tag {24}
514
+ $$
515
+
516
+ According to the chain rule, we have:
517
+
518
+ $$
519
+ d = \left\| \nabla_ {\theta} f _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right) \cdot \left(\mathrm {T} ^ {\top} \left(\mathrm {T y} _ {u} \oslash \mathrm {T f} _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right)\right) - \mathrm {y} _ {u} \oslash f _ {\theta^ {*}} \left(\mathrm {x} _ {u}, \mathrm {A}\right)\right) \right\|, \tag {25}
520
+ $$
521
+
522
+ where $\oslash$ represents the element-wise division operation.
523
+
524
+ Obviously, if $\mathrm{T}$ is a permutation matrix, the difference $d$ of gradient is zero. The proof is concluded for CE loss.
525
+
526
+ A.1.3 Proof of Theorem 4.3. To prove Theorem 4.3, we need to borrow a corollary from [38], which illustrates the impact of incorrect pseudo labels on self-training without distribution shift.
527
+
528
+ COROLLARY A.1. Assuming that the augmented dataset follows the population distribution $P_{pop}$ and $||\nabla_{\theta}l|| \leq \Psi$ for any gradient $\nabla_{\theta}\mathcal{L}$ , the following bound between $\nabla_{\theta}\mathcal{L}_{pop}$ and $\nabla_{\theta}\mathcal{L}_{st}$ holds:
529
+
530
+ $$
531
+ \left| \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} \right| \leqslant \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} 2 \Psi | | P _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} (\bar {y} _ {u} \neq y _ {u}) | |. \tag {26}
532
+ $$
533
+
534
+ Now, we prove Theorem 4.3.
535
+
536
+ Proof. We first calculate the difference between $\nabla_{\theta}\mathcal{L}_{pop}$ and $\nabla_{\theta}\mathcal{L}_{st}$ as follows:
537
+
538
+ $$
539
+ \begin{array}{l} | | \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} | | = \frac {| \mathcal {S} _ {U} |}{| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} |} | | \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l (y _ {u}, \mathrm {p} _ {u}) \\ - \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, p _ {u}\right) | |. \tag {27} \\ \end{array}
540
+ $$
541
+
542
+ Adding and subtracting a same term $\mathbb{E}_{(v_u,y_u)\sim P_{st}(\mathcal{V},\mathcal{Y})}\nabla_\theta l(\bar{y}_u,\mathrm{pu})$ and abbreviating $\frac{|S_U|}{|\mathcal{V}_L\bigcup S_U|}$ as $\eta$ , Eq. 27 can be written as:
543
+
544
+ $$
545
+ \begin{array}{l} \left| \left| \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} \right| \right| = \\ \eta | | \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l (y _ {u}, p _ {u}) - \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l (\bar {y} _ {u}, p _ {u}) \\ + \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) - \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) | | \tag {28} \\ \end{array}
546
+ $$
547
+
548
+ According to the triangle property of the norm, the following inequality is satisfied:
549
+
550
+ $$
551
+ \begin{array}{l} \left\| \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} \right\| \leq \\ \eta (\left| \left| \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {Y}, \mathcal {Y})} \nabla_ {\theta} l \left(y _ {u}, \mathrm {p} _ {u}\right) - \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {Y}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) \right| \right| \\ + \left\| \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) - \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) \right\|. \tag {29} \\ \end{array}
552
+ $$
553
+
554
+ Table 2: The statistics of datasets
555
+
556
+ <table><tr><td>Dataset</td><td>Nodes</td><td>Edges</td><td>Classes</td><td>Features</td><td>Validation</td><td>Test</td></tr><tr><td>Cora</td><td>2708</td><td>5429</td><td>7</td><td>1433</td><td>500</td><td>1000</td></tr><tr><td>Citeseer</td><td>3327</td><td>4732</td><td>6</td><td>3703</td><td>500</td><td>1000</td></tr><tr><td>Pubmed</td><td>19717</td><td>44338</td><td>3</td><td>500</td><td>500</td><td>1000</td></tr><tr><td>CoraFull</td><td>19793</td><td>65311</td><td>70</td><td>8710</td><td>500</td><td>1000</td></tr><tr><td>Flickr</td><td>7575</td><td>239738</td><td>9</td><td>12047</td><td>500</td><td>1000</td></tr></table>
557
+
558
+ Recalling Corollary A.1, we know that the first term on the right hand side satisfies:
559
+
560
+ $$
561
+ \begin{array}{l} \left| \left| \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(y _ {u}, \mathrm {p} _ {u}\right) - \mathbb {E} _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l \left(\bar {y} _ {u}, \mathrm {p} _ {u}\right) \right| \right| \\ \leq 2 \Psi | | P _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} (\bar {y} _ {u} \neq y _ {u}) | |. \tag {30} \\ \end{array}
562
+ $$
563
+
564
+ And for the second term, we have:
565
+
566
+ $$
567
+ \begin{array}{l} \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l (\bar {y} _ {u}, \mathrm {p} _ {u}) - \mathbb {E} _ {(v _ {u}, y _ {u}) \sim P _ {s t} (\mathcal {V}, \mathcal {Y})} \nabla_ {\theta} l (\bar {y} _ {u}, \mathrm {p} _ {u}) | | \\ = \int_ {- \infty} ^ {+ \infty} \int_ {- \infty} ^ {+ \infty} \nabla_ {\theta} l (\bar {y} _ {u}, \mathrm {p} _ {u}) d (P _ {p o p} (\mathcal {V}, \mathcal {Y}) - P _ {s t} (\mathcal {V}, \mathcal {Y})) \\ \leq \Psi \cdot | | P _ {p o p} (\mathcal {V}, \mathcal {Y}) - P _ {s t} (\mathcal {V}, \mathcal {Y}) | |, \tag {31} \\ \end{array}
568
+ $$
569
+
570
+ where the inequality is from our assumption that $||\nabla_{\theta}l||\leq \Psi$
571
+
572
+ Combining Eq. 30 with Eq. 31, we have:
573
+
574
+ $$
575
+ \begin{array}{l} \left| \left| \nabla_ {\theta} \mathcal {L} _ {p o p} - \nabla_ {\theta} \mathcal {L} _ {s t} \right| \right| \leqslant \frac {\left| \mathcal {S} _ {U} \right|}{\left| \mathcal {V} _ {L} \cup \mathcal {S} _ {U} \right|} \Psi (2) \left| \left| P _ {\left(v _ {u}, y _ {u}\right) \sim P _ {p o p} (\mathcal {V}, \mathcal {Y})} \left(\bar {y} _ {u} \neq y _ {u}\right) \right. \right. \\ + \left\| P _ {s t} (\mathcal {V}, \mathcal {Y}) - P _ {p o p} (\mathcal {V}, \mathcal {Y}) \right\|. \tag {32} \\ \end{array}
576
+ $$
577
+
578
+ The proof is concluded.
579
+
580
+ # A.2 Time Complexity Analysis
581
+
582
+ We first analyze the time complexity of a general self-training framework. Assuming training an epoch takes $O(M)$ time, given epochs $E$ , its time complexity in each stage is $O(EM)$ . DR-GST is innovated in information gain and loss correction, which respectively takes $O(TM)$ and $O(Ec^2)$ time in each stage, where $T$ and $c$ are the numbers of sampling for variational inference and class. Moreover, considering that we train a student model twice in each stage, the total time complexity is $O((2E + T)M + Ec^2)$ . In fact, $T$ and $O(Ec^2)$ are always far less than $E$ and $O(EM)$ . Consequently, the time complexity of DR-GST is approximately twice that of the general self-training framework.
583
+
584
+ # A.3 More Experimental Details
585
+
586
+ A.3.1 Details of datasets. We adopt five widely used benchmark datasets from citation networks [3, 26] (i.e., Cora, CiteSeer, Pubmed and CoraFull) and social network [21] (i.e., Flickr) for evaluation. For the citation networks, nodes represent papers, edges are the citation relationship between papers, node features are comprised of bag-of-words vector of the papers and labels represent the fields of papers. And for the social network, nodes in Flickr represent users of the Flickr website, edges are their relationships induced by their photosharing records and labels represent users' interest groups. For all the datasets, We choose 500 nodes for validation, 1000 nodes for test. The details of these datasets are summarized in Table 2. Our data
587
+
588
+ are public and do not contain personally identifiable information and offensive content. The address of our data is https://docs.dgl.ai/en/latest/api/python/dgl.data.html#node-prediction-datasets and the license is Apache License 2.0.
589
+
590
+ A.3.2 Implementation. We supplement the implementation details of DR-GST and all the baselines here.
591
+
592
+ For fair comparison, we utilize the standard GCN with 2 layers as the backbone for all graph self-training framework. We optimize models via Adam with learning rate of 0.01 and early stopping with a window size of 200. In particular, we set L2 regularization with $\lambda_r = 5e - 4$ for Cora, Citeseer, Pubmed, CoraFull and $\lambda_r = 5e - 5$ for Flickr. We set ReLU as the activation function and apply a dropout rate of 0.5 to prevent over-fitting. As for the MC-dropout and MC-dropedge, we set the number of sampling $T = 100$ . Moreover, we apply grid search for other important hyper-parameters. Specifically, the drop rate of MC-dropout and MC-dropedge is chosen from $\{0.1, 0.2, \dots, 0.5\}$ , the balance coefficient $\beta$ for information gain in Eq. 13 is searched in $\{4/3, 1, 2/3, 1/2, 1/3, 1/4\}$ and the threshold $\tau$ is tuned amongst $\{0.4, 0.45, \dots, 0.75\}$ for Cora, Citeseer, $\{0.6, 0.65, \dots, 0.9\}$ for Pubmed, CoraFull and $\{0.75, 0.78, \dots, 0.96\}$ for Flickr.
593
+
594
+ We adopt the implementation of GCN, GAT and APPNP from DGL $^2$ , and the implementations of STs $^3$ and ABN $^4$ are publicly provided by their authors. Considering that the implementation of M3S is not available, we re-implement it referring to the original paper [28]. For all baselines, we perform grid search for important hyper-parameters (i.e., $\tau$ ) to obtain optimal results.
595
+
596
+ A.3.3 Experimental Environment. In this section we summarize the hardware and software environment in our experiments.
597
+
598
+ We utilize a linux machine powered by an Intel(R) Xeon(R) CPU E5-2682 v4 @ 2.50GHz CPU and 4 Tesla P100-PCIE-16GB as well as 4 GeForce RTX 3090 GPU cards.
599
+
600
+ The operating system is Linux version 3.10.0-693.el7.x86_64. We realize our code with Python 3.8.8 as well as some other python packages as follows: PyTorch 1.8.1, DGL 0.6.0 (cuda 10.1), NetworkX 2.5.
2201.11xxx/2201.11349/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae82221be2c8e4f9af6a1e161478e5f14d8d613ae6c1cd52e610698ff28b1287
3
+ size 720821
2201.11xxx/2201.11349/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2201.11xxx/2201.11368/7f8ba145-d060-44e2-99f9-7e27e253fecb_content_list.json ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Prediction and Detection of FDIA and DDoS Attacks in 5G Enabled IoT",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 133,
8
+ 63,
9
+ 861,
10
+ 126
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Hajar Moudoud $*\\ddagger$ , Lyes Khoukhi $\\dagger$ , Soumaya Cherkoui $*$ ,",
17
+ "bbox": [
18
+ 274,
19
+ 150,
20
+ 720,
21
+ 167
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "* Department of Electrical and Computer Engineering, Université de Sherbrooke, Canada",
28
+ "bbox": [
29
+ 200,
30
+ 167,
31
+ 803,
32
+ 183
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "† GREYC CNRS, ENSICAEN, Normandie University, France",
39
+ "bbox": [
40
+ 287,
41
+ 183,
42
+ 709,
43
+ 198
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "$\\ddagger$ University of Technology of Troyes, France",
50
+ "bbox": [
51
+ 341,
52
+ 199,
53
+ 650,
54
+ 214
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "{hajar.moudoud, soumaya.cherkaoui}@usherbrooke.ca, lyes.khoukhi@ensicaen.fr",
61
+ "bbox": [
62
+ 227,
63
+ 215,
64
+ 771,
65
+ 229
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Abstract—Security in the fifth generation (5G) networks has become one of the prime concerns in the telecommunication industry. 5G security challenges come from the fact that 5G networks involve different stakeholders using different security requirements and measures. Deficiencies in security management between these stakeholders can lead to security attacks. Therefore, security solutions should be conceived for the safe deployment of different 5G verticals (e.g., industry 4.0, Internet of Things (IoT), etc.). The interdependencies among 5G and fully connected systems, such as IoT, entail some standard security requirements, namely integrity, availability, and confidentiality. In this article, we propose a hierarchical architecture for securing 5G enabled IoT networks, and a security model for the prediction and detection of False Data Injection Attacks (FDIA) and Distributed Denial of Service attacks (DDoS). The proposed security model is based on a Markov stochastic process, which is used to observe the behavior of each network device, and employ a range-based behavior sifting policy. Simulation results demonstrate the effectiveness of the proposed architecture and model in detecting and predicting FDIA and DDoS attacks in the context of 5G enabled IoT.",
72
+ "bbox": [
73
+ 73,
74
+ 258,
75
+ 488,
76
+ 523
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "I. INTRODUCTION",
83
+ "text_level": 1,
84
+ "bbox": [
85
+ 215,
86
+ 537,
87
+ 346,
88
+ 551
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "The rise of 5G networks with the Internet of Things (IoT) systems is set to improve reliable communications and stable connections. 5G new radio access technology has low latency, high availability, and exceptional speed; all needed for several IoT systems [1, 2]. However, beyond providing network performance, 5G enabled IoT systems should also preserve security and improve the reliability of services. A recent report commissioned by the European Union [3] suggests security risks are “likely to appear or become more prominent in 5G networks” because of the extended use of software to run 5G networks. A successfully launched attack in 5G networks may lead to undesired operations and important consequences. This has been understood by hackers who are using new tactics to monetize their attacks by controlling sensitive data, asking for ransom, or making the network unavailable. Yet, not only external hackers could threaten security in 5G networks; insider actors can be the biggest scourges for the system. Network insider actors designate internal workers within the network or network actors. Network insiders with access to sensitive data or resources could cause information breaches or inject false data, for example.",
95
+ "bbox": [
96
+ 73,
97
+ 558,
98
+ 488,
99
+ 876
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "According to 5G PPP, 5G enabled IoT applications are expected to suffer from several security issues due to the",
106
+ "bbox": [
107
+ 73,
108
+ 876,
109
+ 488,
110
+ 907
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "complexity and expansion of the attack surface [4, 5]. The increasing scale of devices connectivity and interconnectivity along with network slicing will enable a new range of IoT applications. Yet, one of the weak spots in security will be the devices themselves; they can be remotely controlled to form what is known as a botnet to perform serious security attacks. Most of the existing IoT devices were not developed with security as a priority [6]. The volume of data generated by IoT devices is also expected to grow exponentially; traditional intrusion detection methods could be less reliable and further challenging. Therefore, new security attacks detection methods should be further explored in 5G networks. Furthermore, the prediction of network attacks in advance may be a better alternative. Because, once we recognize the possibility of attacks, and verify the insufficient protection, faster mitigation and recovery processes can be deployed.",
117
+ "bbox": [
118
+ 504,
119
+ 257,
120
+ 921,
121
+ 500
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "To protect 5G networks certain security attacks must be addressed; most notably, DDoS attacks. A DDoS attack in the current cellular network (e.g., 4G) can only compromise one service. However, in 5G networks, if a malicious hacker takes control of a slice and launches a DDoS attack, this could compromise services belonging to the same virtual network. In addition, in 5G networks, a DDoS attack could intensify; other installments can also be compromised by this attack if a tunneling protocol is shared between various 5G slices.",
128
+ "bbox": [
129
+ 504,
130
+ 500,
131
+ 921,
132
+ 635
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "IoT systems are enduring an exponential increase of attack surface, because of the large number of possibly compromised IoT devices [7, 8]. Knowing that data transferred by these devices is often critical in decision-making, yet, if it is falsified and quickly transferred using 5G, it could lead to massive consequences for users. Typical 5G enabled IoT systems need some standard security requirements, e.g., 1) resilience to attacks, 2) access control, and 3) data protection. First, resilience to attacks means that the IoT system should not possess a single point of failure, but should adjust itself to network and device failures. Second, data provided by IoT devices requires implementing access control mechanisms to preserve security. For example, before authorizing access to data, user authentication and authorization must be verified. Third, data protection implies the need for resilience to security attacks (e.g., FDIA) and guarantee data authenticity, confidentiality, integrity, and availability. In this paper, motivated by the aforementioned security issues, we propose 1) a hierarchical",
139
+ "bbox": [
140
+ 504,
141
+ 635,
142
+ 921,
143
+ 907
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "aside_text",
149
+ "text": "arXiv:2201.11368v1 [cs.CR] 27 Jan 2022",
150
+ "bbox": [
151
+ 22,
152
+ 268,
153
+ 57,
154
+ 705
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "architecture for securing 5G enabled IoT networks that uses three layers: the access layer, the multi-access edge computing (MEC) layer, and the cloud layer; 2) a prediction and detection model for FDIA and DDoS attacks based on a Markov stochastic process. The model provides the ability to predict security attacks in situations where the outcome is random, depending only on the present state. Finally, to accurately illustrate the effectiveness of the proposed model, we test it on a healthcare IoT use case.",
161
+ "bbox": [
162
+ 76,
163
+ 61,
164
+ 486,
165
+ 196
166
+ ],
167
+ "page_idx": 1
168
+ },
169
+ {
170
+ "type": "text",
171
+ "text": "II. SECURITY IN 5G ENABLED IOT NETWORKS: RELATED WORK",
172
+ "text_level": 1,
173
+ "bbox": [
174
+ 89,
175
+ 208,
176
+ 475,
177
+ 234
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "5G networks will enable IoT devices to communicate and share data faster than ever. However, this development is likely to increase the systems' vulnerability to security threats, including those from malicious nodes [9]. To overcome some of the security issues, researchers have introduced new solutions that are suitable for 5G networks. For example, to prevent unauthorized action within the network, Xue et al. [10] proposed an efficient access control framework that addresses the problem of a single-point performance bottleneck. The framework uses multiple attribute authorities to partition a load of user legitimacy verification, where each authority manages individually all attributes. Yet, this multi-authority access control framework relies on a central authority responsible for generating secret keys for all users. To ensure key collision resistance between the parallel working authorities, the authors proposed to use timestamp numbers, defined separately in a time interval. Even though this framework is robust for many security issues, it does not ensure resilience to internal system security attacks like FDIA. To address the issue of secure access to slices in 5G networks, Sathi et al. [11] proposed a 5G protocol for securing network slices. The proposed protocol is based on proxy re-encryption, thus ensuring slice isolation and service anonymity. To mitigate attacks, such as DDoS, which has a huge impact on the network, Sattar et al. [12] proposed a proactive isolation method to mitigate these attacks in 5G core network slices. This isolation method is suitable for interslices and intra-slices; however, the scalability requirements underlying IoT applications have not been evaluated in this work. To provide service security in 5G vehicular networks Eiza et al. [13] presented a new system model for vehicular networks, addressing privacy and security issues for real-time video reporting. The proposed cryptographic security model aims to preserve privacy for device-to-device communications while limiting overheads. The model is, however, specific to a vehicular context rather than being a general model for IoT applications in a 5G context. For various IoT systems, data is collected from heterogeneous devices that support different applications and technologies. This makes the security even more difficult. Recently, several intrusion detection systems have been proposed to secure 5G enabled IoT applications [14, 15]. Ni et al. [16] proposed an efficient secure network-sliced and service-oriented authentication framework for preserving privacy in network slicing and fog computing in 5G networks. The framework enables both 5G stakeholders and",
184
+ "bbox": [
185
+ 76,
186
+ 241,
187
+ 486,
188
+ 906
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "IoT systems to support anonymous service-oriented authentication. The authors proposed an access control mechanism based on a key agreement for IoT operators in 5G networks. However, the computational burden and delay brought by the framework may not be appropriate for some IoT applications. Furthermore, to detect intrusion in a network embedded with heterogeneous devices, Loukas et al. [17] proposed an efficient dynamic intrusion system for IoT based on RNN and LSTM to identify DDoS, injection attacks, and other malware. To secure the network and detect intrusions, Fan et al. [18] proposed a hidden Markov model. The issue of this work is that it is not suitable for 5G enabled IoT applications that need real-time responses (e.g., smart hospital). We can notice that some presented works are not well suited for 5G enabled IoT latency-sensitive applications. Furthermore, some approaches (e.g., [12] and [13]) cannot ensure the scalability needed by IoT systems. Moreover, in 5G enabled IoT systems, data can be manipulated by different operators and actors. Additionally, apart from security attack mitigation, it is essential to focus on attack prediction and detection. In this paper, we propose the adoption of a proactive approach for securing sensitive data by preventing upcoming threats and responding to these threats before they can cause any harm to the systems.",
195
+ "bbox": [
196
+ 509,
197
+ 61,
198
+ 919,
199
+ 407
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "III. SECURE 5G ENABLED IOT ARCHITECTURE AGAINST FDIA AND DDOS ATTACKS",
206
+ "text_level": 1,
207
+ "bbox": [
208
+ 524,
209
+ 419,
210
+ 903,
211
+ 446
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "In this section, we describe the proposed hierarchical architecture for securing 5G enabled IoT networks. Then, we present the security models implemented in the architecture for the prediction and detection of FDIA and DDoS attacks in 5G enabled IoT networks.",
218
+ "bbox": [
219
+ 509,
220
+ 455,
221
+ 919,
222
+ 527
223
+ ],
224
+ "page_idx": 1
225
+ },
226
+ {
227
+ "type": "text",
228
+ "text": "A. A Secure 5G Enabled IoT Architecture: An Overview",
229
+ "text_level": 1,
230
+ "bbox": [
231
+ 509,
232
+ 540,
233
+ 888,
234
+ 553
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "Fig. 1 represents the hierarchical 5G enabled IoT security architecture based on distributed multi-access edge computing (MEC). This architecture uses three layers: the access layer, the MEC layer, and the cloud layer.",
241
+ "bbox": [
242
+ 509,
243
+ 560,
244
+ 919,
245
+ 619
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "The access layer contains physical devices that collect and transfer data to the MEC layer. Physical devices could be either servers, access points, or wireless devices. In this layer, all received data are submitted in real time to gateways using the 5G network. The 5G network can provide a high-speed data transfer and a short response time; both necessary for time sensitive and critical IoT applications. The gateways are responsible for managing device connections and forwarding control messages to the corresponding devices. To ensure system scalability regarding the number of connected devices, new gateways can be dynamically activated and managed independently.",
252
+ "bbox": [
253
+ 509,
254
+ 619,
255
+ 919,
256
+ 800
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "The MEC layer processes and analyzes the collected data; it offloads all computational tasks from devices to edge servers to address device limitations like limited computing power and high latency. MEC provides a new ecosystem where communications are rapidly performed between networks using MEC hosts. For example, MEC hosts are usually placed one block or two away from devices, hence communication latency can be",
263
+ "bbox": [
264
+ 509,
265
+ 801,
266
+ 919,
267
+ 906
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "image",
273
+ "img_path": "images/b2aa8b2defc22c784f8509ab63e38b1e9585d975dbcf849fb053ae672c187dff.jpg",
274
+ "image_caption": [
275
+ "Fig. 1: Architecture for 5G-enabled IoT applications: access layer to collect data from the physical environment, MEC layer to predict, detect and mitigate security attacks, and cloud layer to store the data."
276
+ ],
277
+ "image_footnote": [],
278
+ "bbox": [
279
+ 212,
280
+ 61,
281
+ 777,
282
+ 390
283
+ ],
284
+ "page_idx": 2
285
+ },
286
+ {
287
+ "type": "text",
288
+ "text": "low enough to support real-time applications. In this layer, 5G gateways are deployed to aggregate and process the collected data from the access layer and provide supplementary edge services. Although MEC can process a high amount of traffic, it suffers from security challenges. Furthermore, sharing data among multiple devices raises the problem of data leakage and loss. For these reasons, we propose implementing a prediction and detection model (see next section) to mitigate FDIA and DDoS attacks, where a control server belonging to the MEC layer orchestrates communication, sends control messages, and updates security parameters.",
289
+ "bbox": [
290
+ 73,
291
+ 453,
292
+ 488,
293
+ 619
294
+ ],
295
+ "page_idx": 2
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "The cloud layer manages massive data while the MEC layer manages real-time data generated by devices. In current clouds, a DDoS attack against a resource (e.g., storage, servers, and services) only concerns that resource, and there is a low chance that the attack affects other resources. In 5G networks, DDoS attacks may affect several resources at the same time because they could be tenants of the same shared virtualized infrastructure. A shared cloud among multiple infrastructures requires strict isolation to avoid security leakage and privacy breaches. To mitigate these issues, we propose using a virtual private cloud to limit the risk of data being compromised or altered. The requirements of European security standards prohibit user data disclosure. For example, in the context of healthcare applications, the right to the security of patient medical information used by healthcare professionals must be respected and data confidentiality must be guaranteed. A fundamental security element in the cloud is limiting access to resources and data. We propose to use a cloud access control system to provide users' security.",
300
+ "bbox": [
301
+ 75,
302
+ 619,
303
+ 488,
304
+ 907
305
+ ],
306
+ "page_idx": 2
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "To efficiently secure the sensitive data managed by the proposed architecture against FDIA and DDoS, we propose three models: the collector model, the prediction and detection model, and the reaction model. Fig. 2 represents the three models with their components.",
311
+ "bbox": [
312
+ 504,
313
+ 453,
314
+ 921,
315
+ 529
316
+ ],
317
+ "page_idx": 2
318
+ },
319
+ {
320
+ "type": "text",
321
+ "text": "B. Prediction and Mitigation of FDIA and DDoS Attacks in 5G Networks",
322
+ "text_level": 1,
323
+ "bbox": [
324
+ 504,
325
+ 547,
326
+ 919,
327
+ 575
328
+ ],
329
+ "page_idx": 2
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "In the following we will detail the three models used in the proposed architecture to secure 5G enabled IoT systems against FDIA and DDoS attacks:",
334
+ "bbox": [
335
+ 504,
336
+ 585,
337
+ 921,
338
+ 631
339
+ ],
340
+ "page_idx": 2
341
+ },
342
+ {
343
+ "type": "text",
344
+ "text": "1) Collector Model: The gateways of the access layer ensure real-time monitoring of devices. They are responsible for gathering all data sent by devices of the access layer and observing whether they transmit or not the sensed data to their destinations. This is realized through a non-selective listening mode, where each gateway treats received data in its communication range. In this module, we implement a cleansing process where we correct and remove inaccurate information. In a 5G context, the data cleansing process should be performed at each gateway to ensure data quality. The ability to collect data while preserving its quality is likely to result in more accurate attack detection. Occasionally, in an IoT scenario data records are duplicated for different reasons. Yet, to avoid system saturation, we propose removing data clones and only keep representative samples; this reduces unnecessary calculations for the prediction and detection of attacks.",
345
+ "bbox": [
346
+ 524,
347
+ 635,
348
+ 921,
349
+ 907
350
+ ],
351
+ "page_idx": 2
352
+ },
353
+ {
354
+ "type": "image",
355
+ "img_path": "images/28505717ee555a1b123a59585823689521e1419a0d5559c150880db853f34b4d.jpg",
356
+ "image_caption": [
357
+ "Fig. 2: Three models for securing 5G enabled-IoT applications against FDIA and DDoS attacks: collector model, prediction and detection model, and reaction model."
358
+ ],
359
+ "image_footnote": [],
360
+ "bbox": [
361
+ 218,
362
+ 97,
363
+ 782,
364
+ 385
365
+ ],
366
+ "page_idx": 3
367
+ },
368
+ {
369
+ "type": "list",
370
+ "sub_type": "text",
371
+ "list_items": [
372
+ "2) Prediction and Detection Model: Data transferred by IoT devices are pre-processed; this step consists in converting the format of records and their classification. Data is often transferred from different devices with heterogeneous configurations; hence, it is vital to convert these data into an understandable and unified format. To support security operations, we classify activities related to data into three categories: reading, updating, and deleting activities. This classification allows an efficient response to security threats alongside data protection. To predict and detect security attacks, we propose a mathematical model based on a Markov stochastic process, where we study the logged activity history of a device. The history log contains information about device states, users' logins, and system operations, and helps to keep track of system activities based on information and states. Investigating the log history, we can classify security risk over three states: authentic, suspicious, and malicious. An authentic state refers to normal log behavior or a low-risk attack. Suspicious states are detected when the number of logs, i.e., performed actions in the system is higher than a threshold but acceptable by the system. Malicious states happen when the number of logs is higher than a maximal threshold, referring to a high-security risk and possible attacks.",
373
+ "3) Reaction Model: Building a secure 5G enabled IoT system requires an interconnection between all security mechanisms. For example, devices authentication, access control, and data encryption should be considered together. To mitigate security attacks detected by the"
374
+ ],
375
+ "bbox": [
376
+ 89,
377
+ 453,
378
+ 488,
379
+ 906
380
+ ],
381
+ "page_idx": 3
382
+ },
383
+ {
384
+ "type": "text",
385
+ "text": "prediction and detection model, we implement security mechanisms (e.g., security monitoring and limited access). Fig. 3 presents a flowchart that details our reaction model that is designed to set security parameters for the detection and mitigation of future attacks, and harden system resilience to attacks. First, we determine the activity's category. A deleting activity is automatically blocked and the device that tries to perform it is prohibited from the access privileges. Limiting such activity is important because sensitive data must not be lost; old records should be kept for future data analysis. This type of activity is recovered from in a short time. For the two remaining activities (i.e., reading activity and updating activity), we verify the corresponding state. If it is a suspicious state, data are encrypted to protect their integrity and then sent to the cloud layer to be stored. Otherwise, data are sent to an observation state where we evaluate the observation time passed during this step. If this time is higher than the holding time of the current state, we report a malicious behavior that can lead to an attack. The holding time is the amount of time passed before making a state transition, depending on the activity type. The reaction to security threats is related to the state of the system and the security risk (suspicious or malicious). If the behavior is reported as harmful, we block access and monitor the performance of the attack mitigation mechanisms. Otherwise, if it is a harmless behavior, we send control messages to verify the data and update the security settings.",
386
+ "bbox": [
387
+ 544,
388
+ 453,
389
+ 921,
390
+ 892
391
+ ],
392
+ "page_idx": 3
393
+ },
394
+ {
395
+ "type": "image",
396
+ "img_path": "images/15298fc74b44ffe2523038955fa8004dadb2ef338de0f2e17d1e4038c7ceb9e5.jpg",
397
+ "image_caption": [
398
+ "Fig. 3: Intrusion detection and reaction strategy deployment."
399
+ ],
400
+ "image_footnote": [],
401
+ "bbox": [
402
+ 212,
403
+ 61,
404
+ 782,
405
+ 391
406
+ ],
407
+ "page_idx": 4
408
+ },
409
+ {
410
+ "type": "image",
411
+ "img_path": "images/ff1ab48c5fb3d664021c76734134d6de8e46045d31c500777fd318a0d2f6b29f.jpg",
412
+ "image_caption": [
413
+ "Fig. 4: Device state ranges."
414
+ ],
415
+ "image_footnote": [],
416
+ "bbox": [
417
+ 138,
418
+ 453,
419
+ 424,
420
+ 680
421
+ ],
422
+ "page_idx": 4
423
+ },
424
+ {
425
+ "type": "text",
426
+ "text": "IV. MODEL TO PREDICT AND DETECT FDIA AND DDOS ATTACKS IN 5G ENABLED IOT",
427
+ "text_level": 1,
428
+ "bbox": [
429
+ 80,
430
+ 747,
431
+ 483,
432
+ 777
433
+ ],
434
+ "page_idx": 4
435
+ },
436
+ {
437
+ "type": "text",
438
+ "text": "We present a robust stochastic model for FDIA and DDoS attacks prediction and detection based on device behaviors. The behavioral analysis is achieved using a Markov stochastic process for the prediction and detection of attacks. The stochastic modeling aims to introduce various security ranges with different thresholds to identify the behavior of IoT devices in the system based on their log file. The latter records all events that happened between IoT devices and the system. We",
439
+ "bbox": [
440
+ 73,
441
+ 786,
442
+ 490,
443
+ 907
444
+ ],
445
+ "page_idx": 4
446
+ },
447
+ {
448
+ "type": "text",
449
+ "text": "consider that each device can perform three basic elementary activities which are: reading, updating, and deleting. A device can generate an attack by performing maliciously one of these elementary activities. For instance, a DDoS attack occurs when a device floods the system with too many reading activities. We classify security attacks into two categories: harmless attacks and harmful attacks. A harmless attack can happen accidentally by a device due to mishandling. However, a harmful attack is directed by a malicious device. For each IoT device in the network; the input data into our security model is the number of activities in the log. We propose a stochastic model to identify the state of a device based on its previous evolution. We use a range-based measurement sifting policy (See Fig. 4) to represent the space of possible states of each device. We aim to study the behavior of each device, then classify it into five classes as shown in Fig. 4 authentic, suspicious harmless, suspicious harmful, malicious harmless, malicious harmful. The main idea of this stochastic model is to introduce five classes with six thresholds values $A_{HL}, A_{HF}, S_{HL}, M_{HL}, S_{HF}$ , and $M_{HF}$ , corresponding to, respectively, authentic harmless, authentic harmful, suspicious harmless, malicious harmless, suspicious harmful, malicious harmful; and then, we can identify the class of each device. In our work, the thresholds are fixed values calculated using the median of the number of activities in the historical log profile. For example, a device located in the suspicious harmful state at the start of a time interval can move to a malicious harmful state if the number of reported activities are higher than a threshold (i.e., $S_{HF}$ ). Due to the space limitation of the paper, we will illustrate only the case of reading activity. For every",
450
+ "bbox": [
451
+ 504,
452
+ 441,
453
+ 924,
454
+ 897
455
+ ],
456
+ "page_idx": 4
457
+ },
458
+ {
459
+ "type": "text",
460
+ "text": "reading activity, seven states are identified according to the log activity $A_{id}$ corresponding to a time frame, where id is the identifier of a device. The states are listed as follows:",
461
+ "bbox": [
462
+ 73,
463
+ 61,
464
+ 491,
465
+ 107
466
+ ],
467
+ "page_idx": 5
468
+ },
469
+ {
470
+ "type": "list",
471
+ "sub_type": "text",
472
+ "list_items": [
473
+ "- Authentic state (i.e., Auth): the device's number of log activities is between $A_{HLR}$ and $A_{HFR}$ .",
474
+ "- Suspicious harmless reading state (i.e., $S_{HLR}$ ): the device's number of log activities is between $S_{HLR}$ and $A_{HLR}$ .",
475
+ "- Suspicious harmful reading state (i.e., $S_{HFR}$ ): the device's number of log activities is between $A_{HFR}$ and $S_{HFR}$ .",
476
+ "- Malicious harmless reading state (i.e., $M_{HLR}$ ): the device's number of log activities is inferior to $M_{HLR}$ .",
477
+ "- Malicious harmful reading state (i.e., $M_{HFR}$ ): the device's number of log activities is superior to $M_{HFR}$ .",
478
+ "- Stop state (i.e., Stop): a device can be placed in this state when the log activity is within the malicious ranges $M_{HLR}$ or $M_{HFR}$ .",
479
+ "- Observation state (i.e., Obs): a device can be placed in this state if it stays during an observation time in one of the two states ( $S_{HLR}$ or $S_{HFR}$ )."
480
+ ],
481
+ "bbox": [
482
+ 89,
483
+ 109,
484
+ 486,
485
+ 378
486
+ ],
487
+ "page_idx": 5
488
+ },
489
+ {
490
+ "type": "text",
491
+ "text": "Each device can move from one state to another, this is referred to as transition. As shown in Fig. 5, the state space is given by $S_{HLR}$ , $S_{HLR}$ , $M_{HLR}$ , $M_{HFR}$ , $S_{HLU}$ , $S_{HLU}$ , $M_{HLU}$ , $M_{HFU}$ , Auth, Obs, and Stop. Also, we observe that the Markov chain is irreducible and aperiodic with Semi-process. The evolution of the network is represented by a Semi-Markov process, the state of the network is the union of all devices' states. Furthermore, the state of a device is memoryless (i.e., the present, the past, and the future are independent). We use a stochastic state transition matrix to represent the evolution of behavior measurement of a device over time. The matrix contains transition probabilities between two states (i.e., classes according to a range-based activity sifting policy), where $P_{i\\rightarrow j}$ is the transition probability of a device from state i to state j. The transition probability is the fraction of the number of transitions of a device k from current state i to another state j over the expected number of visits to state i. Each device transition probability denotes the conditional probability that a device transits from one state to another. The Markov process is a random process, i.e., a sequence of random states with a transition probability. For instance, a device can transit from an authentic state (Auth), to a suspicious harmless state $(S_{HL})$ , and then transit to an observation state (Obs). However, we can notice in Fig. 5 that a device cannot transit from state Stop to any other states. This state refers to an absorbing state; once a device transits to it, the system is in alert mode. This means that the Markov process determines that an attack has occurred, and an intrusion alert should be reported. All other remaining states are transitioning states.",
492
+ "bbox": [
493
+ 73,
494
+ 383,
495
+ 490,
496
+ 835
497
+ ],
498
+ "page_idx": 5
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "V. EVALUATION",
503
+ "text_level": 1,
504
+ "bbox": [
505
+ 220,
506
+ 843,
507
+ 341,
508
+ 857
509
+ ],
510
+ "page_idx": 5
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "In this section, we first describe the evaluation settings, then we discuss the performance evaluation results. To verify the effectiveness of the proposed 5G enabled IoT security",
515
+ "bbox": [
516
+ 73,
517
+ 862,
518
+ 490,
519
+ 907
520
+ ],
521
+ "page_idx": 5
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "solution to predict and mitigate FDIA and DDoS attacks, we performed extensive experiments. The experiments were meant to evaluate the detection rate and the number of attacks mitigated by the proposed security model [19, 20]. We simulated the proposed solution on Intel machine Core™ i7-8550U. This experiment was conducted using real log activity of a mobile health application available online [21], recording activities performed by the phone sensors. Fig. 6 gives the details of the performance evaluation. In Fig. 6a, we compare the log activity of three topologies (i.e., log with attacks, log after attacks prediction and mitigation, and the original log without any attacks). In the experiment, we generate the log under attack by randomly generating a number of FDIA and DDoS attacks during a certain time slot. The log after attack prediction and mitigation is the output of the proposed security models within the log with attacks. First, we inject the log under attacks in the collector model to process and clean the data. Then, the prediction and detection model detects malicious behavior and predicts attacks. Finally, the reaction model performs active defense to block anomalous behaviors in the network and mitigate the detected attacks. The original log is the log extract from the healthcare application. In Fig. 6a, we can observe that the proposed security solution mitigates all attacks after some time. Furthermore, after a period we observe that the log after attacks prediction and mitigation and the original log are overlapped; this explains the efficiency of the proposed models to mitigate attacks.",
526
+ "bbox": [
527
+ 504,
528
+ 61,
529
+ 921,
530
+ 468
531
+ ],
532
+ "page_idx": 5
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "In Fig. 6b, we compare the number of attenuated attacks as a function of time. To assess the reaction model and its ability to remember old attacks, we compare the secure log and original log with the log under attack. The log with attacks is made by reducing the number of attacks over time. From this figure, we observe that the proposed solution mitigates the maximum number of attacks. Besides, even if the original log is considered to illustrate a secure system, we notice that certain attacks were not detected while the proposed security solution attenuated them.",
537
+ "bbox": [
538
+ 504,
539
+ 470,
540
+ 921,
541
+ 619
542
+ ],
543
+ "page_idx": 5
544
+ },
545
+ {
546
+ "type": "text",
547
+ "text": "In Fig. 6c, we evaluate the intrusion error rate of the security model against FDIA and DDoS attacks. We injected a fixed number of attacks to the original log and calculated the error rate. We observe that our solution is effective when the number of activities is small in time; the error rate of prediction FDIA increases with the number of logs. Yet, this error increases slowly. With further log activities, we observe that the rates of error detection of FDIA and DDoS attacks are reduced and almost equal.",
548
+ "bbox": [
549
+ 504,
550
+ 621,
551
+ 921,
552
+ 755
553
+ ],
554
+ "page_idx": 5
555
+ },
556
+ {
557
+ "type": "text",
558
+ "text": "In Fig. 6d, we randomly generate fifty attacks and evaluate the detection rate of our security solution. We observe that while increasing the number of log activity, the detection rate of the prediction and detection model increases. This figure displays the scalability of our solution and its capacity to predict and detect security attacks when having many logs.",
559
+ "bbox": [
560
+ 504,
561
+ 757,
562
+ 921,
563
+ 848
564
+ ],
565
+ "page_idx": 5
566
+ },
567
+ {
568
+ "type": "text",
569
+ "text": "VI. DISCUSSIONS AND FUTURE DIRECTIONS",
570
+ "text_level": 1,
571
+ "bbox": [
572
+ 524,
573
+ 856,
574
+ 901,
575
+ 871
576
+ ],
577
+ "page_idx": 5
578
+ },
579
+ {
580
+ "type": "text",
581
+ "text": "This section presents a discussion of the proposed security model and the possible future direction. To efficiently achieve",
582
+ "bbox": [
583
+ 504,
584
+ 877,
585
+ 921,
586
+ 907
587
+ ],
588
+ "page_idx": 5
589
+ },
590
+ {
591
+ "type": "image",
592
+ "img_path": "images/183033d0c6d006701a27747b2eccac14b635e2cc90de926bfa2b388c3ae4e11d.jpg",
593
+ "image_caption": [
594
+ "Fig. 5: Markov chain process to secure 5G enabled-IoT applications."
595
+ ],
596
+ "image_footnote": [],
597
+ "bbox": [
598
+ 202,
599
+ 65,
600
+ 795,
601
+ 396
602
+ ],
603
+ "page_idx": 6
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "security and resilience to the ever-evolving threat landscape in 5G enabled IoT networks, we proposed a security model to predict and detect malicious device behavior that can cause FDIA and DDoS attacks. To predict the occurrence of an attack in the network, the security model uses a Markov process. This prediction can be made regarding the occurrence of an attack in the network, based only on its current state without knowing the log history. Indeed, the proposed model predicts attacks by a device by only knowing the current state of the device; it does not need the full-size log history file to predict an attack. This allows both a swift detection/mitigation process, and overcoming the problem of the exponential growth of log size. The Markov process is an analytical method, which means that the reliability parameter of the prediction is calculated using a probability formula. This has a considerable advantage of uncovering future attacks and bringing faster and accurate mitigation to the network. Indeed, the early prediction of potential attacks in the network can lead to an efficient reaction to them. Furthermore, the Markov process is lightweight and does not require complex computing; it can be deployed at the edge of the network.",
608
+ "bbox": [
609
+ 73,
610
+ 455,
611
+ 488,
612
+ 773
613
+ ],
614
+ "page_idx": 6
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "To detect attacks in the network, we propose tracking device activities in the network. Therefore, we proposed using a range-based activity sifting policy based on different thresholds to represent the space of possible device states. The state of a device corresponds to its current activity in the network. We consider that each device can transit from one state to another, and to identify each state we used fixed threshold values determined from the historical log profile of",
619
+ "bbox": [
620
+ 73,
621
+ 786,
622
+ 490,
623
+ 907
624
+ ],
625
+ "page_idx": 6
626
+ },
627
+ {
628
+ "type": "text",
629
+ "text": "the device. For example, we can detect a false data injection attack when the number of activities of a device during a period is higher than the permitted thresholds. In this work, the detection of attacks is conducted with fixed threshold values. However, we consider that the threshold values determination for each device should be integrated dynamically into the Markov process. In our future work, we intend to evaluate the use of a reinforcement learning strategy to determine these threshold values for each device in the network.",
630
+ "bbox": [
631
+ 504,
632
+ 455,
633
+ 924,
634
+ 592
635
+ ],
636
+ "page_idx": 6
637
+ },
638
+ {
639
+ "type": "text",
640
+ "text": "VII. CONCLUSION",
641
+ "text_level": 1,
642
+ "bbox": [
643
+ 645,
644
+ 606,
645
+ 782,
646
+ 619
647
+ ],
648
+ "page_idx": 6
649
+ },
650
+ {
651
+ "type": "text",
652
+ "text": "In this article, we presented a hierarchical architecture for securing 5G enabled IoT networks, and a stochastic Markov model for securing against FDIA and DDoS attacks. The architecture includes three tiers (i.e., the access layer, the MEC layer, and the cloud layer) and implements three security models (i.e., collector models, prediction and detection model, and reaction model). We have detailed the stochastic Markov detection and prediction model to mitigate FDIA and DDoS attacks by examining network devices behavior. Finally, we have evaluated the performance of the proposed security solution using a healthcare application as a use case. The extensive simulation results showed a low error rate, a high detection rate, and a decrease in the number of attacks in a short period.",
653
+ "bbox": [
654
+ 504,
655
+ 628,
656
+ 923,
657
+ 840
658
+ ],
659
+ "page_idx": 6
660
+ },
661
+ {
662
+ "type": "text",
663
+ "text": "ACKNOWLEDGEMENT",
664
+ "text_level": 1,
665
+ "bbox": [
666
+ 637,
667
+ 854,
668
+ 792,
669
+ 867
670
+ ],
671
+ "page_idx": 6
672
+ },
673
+ {
674
+ "type": "text",
675
+ "text": "The authors would like to thank the Natural Sciences and Engineering Research Council of Canada, as well as FEDER",
676
+ "bbox": [
677
+ 506,
678
+ 877,
679
+ 923,
680
+ 907
681
+ ],
682
+ "page_idx": 6
683
+ },
684
+ {
685
+ "type": "image",
686
+ "img_path": "images/70975bf96b70446c6fd65268dc44f1fb9ef0f6708b5db599aa83bd32071ad493.jpg",
687
+ "image_caption": [
688
+ "(a)"
689
+ ],
690
+ "image_footnote": [],
691
+ "bbox": [
692
+ 106,
693
+ 88,
694
+ 493,
695
+ 229
696
+ ],
697
+ "page_idx": 7
698
+ },
699
+ {
700
+ "type": "image",
701
+ "img_path": "images/830e7dc47a48f929eea40c66a5a46c80fe6e141e0c2b59de9b8860628270278e.jpg",
702
+ "image_caption": [
703
+ "(b)"
704
+ ],
705
+ "image_footnote": [],
706
+ "bbox": [
707
+ 516,
708
+ 85,
709
+ 898,
710
+ 229
711
+ ],
712
+ "page_idx": 7
713
+ },
714
+ {
715
+ "type": "image",
716
+ "img_path": "images/7e72747a24c545ce0d463d1a6d3a42b6744abe8b01c7450d867dfad21f6cc178.jpg",
717
+ "image_caption": [
718
+ "(c)",
719
+ "Fig. 6: Performance evaluation of a secure 5G enabled-IoT application using Markov stochastic process: a) performance comparison of the number of activities in the log; b) performance comparison of the number of mitigated attacks in the three topologies; c) detection error of FDIA and DDoS attacks in the log after attack prediction and mitigation; d) detection rate of security attacks."
720
+ ],
721
+ "image_footnote": [],
722
+ "bbox": [
723
+ 109,
724
+ 260,
725
+ 493,
726
+ 402
727
+ ],
728
+ "page_idx": 7
729
+ },
730
+ {
731
+ "type": "image",
732
+ "img_path": "images/94287c0f0cffc0bec0d75e049c1deee50add11c1211018723c164dbedd0137fc.jpg",
733
+ "image_caption": [
734
+ "(d)"
735
+ ],
736
+ "image_footnote": [],
737
+ "bbox": [
738
+ 511,
739
+ 260,
740
+ 898,
741
+ 402
742
+ ],
743
+ "page_idx": 7
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "and GrandEst Region in France, for the financial support of this research.",
748
+ "bbox": [
749
+ 73,
750
+ 486,
751
+ 488,
752
+ 515
753
+ ],
754
+ "page_idx": 7
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "REFERENCES",
759
+ "text_level": 1,
760
+ "bbox": [
761
+ 233,
762
+ 526,
763
+ 330,
764
+ 539
765
+ ],
766
+ "page_idx": 7
767
+ },
768
+ {
769
+ "type": "list",
770
+ "sub_type": "ref_text",
771
+ "list_items": [
772
+ "[1] C. Lai, R. Lu, D. Zheng, and X. S. Shen, \"Security and Privacy Challenges in 5G-Enabled Vehicular Networks,\" IEEE Network, vol. 34, no. 2, pp. 37-45, Mar. 2020.",
773
+ "[2] A. Abouaomar, M. Elmachkour, A. Kobbane, H. Tembine, and M. Ayaida, \"Users-Fogs association within a cache context in 5G networks:Coalition game model,\" in 2018 IEEE Symposium on Computers and Communications (ISCC), Jun. 2018, pp. 00014-00019, iSSN: 1530-1346.",
774
+ "[3] Anonymous, \"EU-wide coordinated risk assessment of 5G networks security,\" Oct. 2019.",
775
+ "[4] I. Ahmad, T. Kumar, M. Liyanage, J. Okwuibe, M. Yliantila, and A. Gurtov, \"Overview of 5G Security Challenges and Solutions,\" IEEE Communications Standards Magazine, vol. 2, no. 1, pp. 36-43, Mar. 2018.",
776
+ "[5] A. Arfaoui, S. Cherkaoui, A. Kribiche, S. M. Senouci, and M. Hamdi, \"Context-Aware Adaptive Authentication and Authorization in Internet of Things,\" in ICC 2019 - 2019 IEEE International Conference on Communications (ICC), May 2019, pp. 1-6.",
777
+ "[6] H. Moudoud, S. Cherkaoui, and L. Khoukhi, \"An IoT Blockchain Architecture Using Oracles and Smart Contracts: the Use-Case of a Food Supply Chain,\" in 2019 IEEE 30th Annual International Symposium on Personal, Indoor and Mobile Radio Communications (PIMRC), Sep. 2019, pp. 1-6.",
778
+ "[7] A. Rachedi, M. H. Rehmani, S. Cherkoui, and J. J. P. C. Rodrigues, \"IEEE Access Special Section Editorial: The Plethora of Research in Internet of Things (IoT),\" IEEE Access, vol. 4, pp. 9575-9579, 2016.",
779
+ "[8] E. D. Ngangue Ndih and S. Cherkaoui, “On Enhancing Technology Coexistence in the IoT Era: ZigBee and 802.11 Case,” IEEE Access, vol. 4, pp. 1835–1844, 2016.",
780
+ "[9] J. Rezgui and S. Cherkouei, “Detecting faulty and malicious vehicles using rule-based communications data mining,” in 2011 IEEE 36th Conference on Local Computer Networks, Oct. 2011, pp. 827–834."
781
+ ],
782
+ "bbox": [
783
+ 81,
784
+ 542,
785
+ 488,
786
+ 906
787
+ ],
788
+ "page_idx": 7
789
+ },
790
+ {
791
+ "type": "list",
792
+ "sub_type": "ref_text",
793
+ "list_items": [
794
+ "[10] K. Xue, Y. Xue, J. Hong, W. Li, H. Yue, D. S. L. Wei, and P. Hong, \"RAAC: Robust and Auditable Access Control With Multiple Attribute Authorities for Public Cloud Storage,\" IEEE Transactions on Information Forensics and Security, vol. 12, no. 4, pp. 953-967, Apr. 2017.",
795
+ "[11] V. N. Sathi, M. Srinivasan, P. K. Thiruvasagam, and S. R. M. Chebiyyam, “A Novel Protocol for Securing Network Slice Component Association and Slice Isolation in 5G Networks,” in Proceedings of the 21st ACM International Conference on Modeling, Analysis and Simulation of Wireless and Mobile Systems - MSWIM '18. Montreal, QC, Canada: ACM Press, 2018, pp. 249-253.",
796
+ "[12] D. Sattar and A. Matrawy, \"Towards Secure Slicing: Using Slice Isolation to Mitigate DDoS Attacks on 5G Core Network Slices,\" in 2019 IEEE Conference on Communications and Network Security (CNS), Jun. 2019, pp. 82-90.",
797
+ "[13] M. Hashem Eiza, Q. Ni, and Q. Shi, \"Secure and Privacy-Aware Cloud-Assisted Video Reporting Service in 5G-Enabled Vehicular Networks,\" IEEE Transactions on Vehicular Technology, vol. 65, no. 10, pp. 7868-7881, Oct. 2016.",
798
+ "[14] S. Sicari, A. Rizzardi, and A. Coen-Porisini, “5G In the internet of things era: An overview on security and privacy challenges,” Computer Networks, vol. 179, p. 107345, Oct. 2020.",
799
+ "[15] H. Liang, J. Wu, S. Mumtaz, J. Li, X. Lin, and M. Wen, \"MBID: Micro-Blockchain-Based Geographical Dynamic Intrusion Detection for V2X,\" IEEE Communications Magazine, vol. 57, no. 10, pp. 77–83, Oct. 2019.",
800
+ "[16] J. Ni, X. Lin, and X. S. Shen, \"Efficient and Secure Service-Oriented Authentication Supporting Network Slicing for 5G-Enabled IoT,\" IEEE Journal on Selected Areas in Communications, vol. 36, no. 3, pp. 644–657, Mar. 2018.",
801
+ "[17] G. Loukas, T. Vuong, R. Heartfield, G. Sakellari, Y. Yoon, and D. Gan, \"Cloud-Based Cyber-Physical Intrusion Detection for Vehicles Using Deep Learning,\" IEEE Access, vol. 6, pp. 3491-3508, 2018.",
802
+ "[18] Z. Fan, Y. Xiao, A. Nayak, and C. Tan, \"An improved network security situation assessment approach in software defined networks,\" Peer-to-Peer Networking and Applications, vol. 12, no. 2, pp. 295-309, Mar. 2019. [Online]. Available: https://doi.org/10.1007/s12083-017-0604-2",
803
+ "[19] E. D. N. Ndih and S. Cherkoui, \"Chapter 17 - Simulation methods,"
804
+ ],
805
+ "bbox": [
806
+ 508,
807
+ 488,
808
+ 921,
809
+ 897
810
+ ],
811
+ "page_idx": 7
812
+ },
813
+ {
814
+ "type": "list",
815
+ "sub_type": "ref_text",
816
+ "list_items": [
817
+ "techniques and tools of computer systems and networks,\" in Modeling and Simulation of Computer Networks and Systems, M. S. Obaidat, P. Nicopolitidis, and F. Zarai, Eds. Boston: Morgan Kaufmann, Jan. 2015, pp. 485-504.",
818
+ "[20] E. D. Ngangue Ndih, S. Cherkaoui, and I. Dayoub, \"Analytic Modeling of the Coexistence of IEEE 802.15.4 and IEEE 802.11 in Saturation Conditions,\" IEEE Communications Letters, vol. 19, no. 11, pp. 1981-1984, Nov. 2015.",
819
+ "[21] J. Zhu, S. He, J. Liu, P. He, Q. Xie, Z. Zheng, and M. R. Lyu, \"Tools and Benchmarks for Automated Log Parsing,\" arXiv:1811.03509 [cs], Jan. 2019."
820
+ ],
821
+ "bbox": [
822
+ 76,
823
+ 64,
824
+ 488,
825
+ 189
826
+ ],
827
+ "page_idx": 8
828
+ }
829
+ ]