dawnkun commited on
Commit
3a7144d
·
verified ·
1 Parent(s): 8963945

Upload 101 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +33 -0
  2. HealthRec/Benchmarks/EB_NeRD/EB-NERD_behavior.tsv +3 -0
  3. HealthRec/Benchmarks/EB_NeRD/EB_NERD_data.csv +0 -0
  4. HealthRec/Benchmarks/QB/QB_behaviour.tsv +0 -0
  5. HealthRec/Benchmarks/QB/QB_data.xlsx +3 -0
  6. HealthRec/Benchmarks/TN/TN_behaviour.tsv +0 -0
  7. HealthRec/Benchmarks/TN/TN_data.xlsx +3 -0
  8. HealthRec/Benchmarks/readme.md +32 -0
  9. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_neg_emb100.npy +3 -0
  10. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_pos_emb100.npy +3 -0
  11. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_reason_emb100.npy +3 -0
  12. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_title_emb100.npy +3 -0
  13. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/test.txt +3 -0
  14. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/train.txt +3 -0
  15. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_neg_emb100.npy +3 -0
  16. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_pos_emb100.npy +3 -0
  17. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_reason_emb100.npy +3 -0
  18. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_title_emb100.npy +3 -0
  19. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/test.txt +0 -0
  20. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/train.txt +3 -0
  21. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_neg_emb100.npy +3 -0
  22. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_pos_emb100.npy +3 -0
  23. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_reason_emb100.npy +3 -0
  24. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_title_emb100.npy +3 -0
  25. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/test.txt +0 -0
  26. HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/train.txt +3 -0
  27. HealthRec/HealthRec_code/BERT4Rec/Bert4Rec.py +763 -0
  28. HealthRec/HealthRec_code/BERT4Rec/Bert4RecHealth.py +932 -0
  29. HealthRec/HealthRec_code/BERT4Rec/environment.yml +88 -0
  30. HealthRec/HealthRec_code/GRU4Rec/dataset.py +132 -0
  31. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_neg_emb100.npy +3 -0
  32. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_pos_emb100.npy +3 -0
  33. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_reason_emb100.npy +3 -0
  34. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_title_emb100.npy +3 -0
  35. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/test.txt +3 -0
  36. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/train.txt +3 -0
  37. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_neg_emb100.npy +3 -0
  38. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_pos_emb100.npy +3 -0
  39. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_reason_emb100.npy +3 -0
  40. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_title_emb100.npy +3 -0
  41. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/test.txt +0 -0
  42. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/train.txt +3 -0
  43. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_neg_emb100.npy +3 -0
  44. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_pos_emb100.npy +3 -0
  45. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_reason_emb100.npy +3 -0
  46. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_title_emb100.npy +3 -0
  47. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/test.txt +0 -0
  48. HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/train.txt +3 -0
  49. HealthRec/HealthRec_code/GRU4Rec/gru4rec.py +59 -0
  50. HealthRec/HealthRec_code/GRU4Rec/healthRec.py +215 -0
.gitattributes CHANGED
@@ -57,3 +57,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ HealthRec/Benchmarks/EB_NeRD/EB-NERD_behavior.tsv filter=lfs diff=lfs merge=lfs -text
61
+ HealthRec/Benchmarks/QB/QB_data.xlsx filter=lfs diff=lfs merge=lfs -text
62
+ HealthRec/Benchmarks/TN/TN_data.xlsx filter=lfs diff=lfs merge=lfs -text
63
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_BERT4Rec.txt filter=lfs diff=lfs merge=lfs -text
64
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_BERT4Rec+.txt filter=lfs diff=lfs merge=lfs -text
65
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_GRU4Rec.txt filter=lfs diff=lfs merge=lfs -text
66
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_GRU4Rec+.txt filter=lfs diff=lfs merge=lfs -text
67
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_NARM.txt filter=lfs diff=lfs merge=lfs -text
68
+ HealthRec/HealthinessMetrics/datasets/NERD/prediction_health_NERD_NARM+.txt filter=lfs diff=lfs merge=lfs -text
69
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_BERT4Rec.txt filter=lfs diff=lfs merge=lfs -text
70
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_BERT4Rec+.txt filter=lfs diff=lfs merge=lfs -text
71
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_GRU4Rec.txt filter=lfs diff=lfs merge=lfs -text
72
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_GRU4Rec+.txt filter=lfs diff=lfs merge=lfs -text
73
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_NARM.txt filter=lfs diff=lfs merge=lfs -text
74
+ HealthRec/HealthinessMetrics/datasets/QB/prediction_health_QB_NARM+.txt filter=lfs diff=lfs merge=lfs -text
75
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_BERT4Rec.txt filter=lfs diff=lfs merge=lfs -text
76
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_BERT4Rec+.txt filter=lfs diff=lfs merge=lfs -text
77
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_GRU4Rec.txt filter=lfs diff=lfs merge=lfs -text
78
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_GRU4Rec+.txt filter=lfs diff=lfs merge=lfs -text
79
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_NARM.txt filter=lfs diff=lfs merge=lfs -text
80
+ HealthRec/HealthinessMetrics/datasets/TN/prediction_health_TN_NARM+.txt filter=lfs diff=lfs merge=lfs -text
81
+ HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/test.txt filter=lfs diff=lfs merge=lfs -text
82
+ HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/train.txt filter=lfs diff=lfs merge=lfs -text
83
+ HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/train.txt filter=lfs diff=lfs merge=lfs -text
84
+ HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/train.txt filter=lfs diff=lfs merge=lfs -text
85
+ HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/test.txt filter=lfs diff=lfs merge=lfs -text
86
+ HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/train.txt filter=lfs diff=lfs merge=lfs -text
87
+ HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/train.txt filter=lfs diff=lfs merge=lfs -text
88
+ HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/train.txt filter=lfs diff=lfs merge=lfs -text
89
+ HealthRec/HealthRec_code/NARM/datasetsHealth/NERD/test.txt filter=lfs diff=lfs merge=lfs -text
90
+ HealthRec/HealthRec_code/NARM/datasetsHealth/NERD/train.txt filter=lfs diff=lfs merge=lfs -text
91
+ HealthRec/HealthRec_code/NARM/datasetsHealth/QB/train.txt filter=lfs diff=lfs merge=lfs -text
92
+ HealthRec/HealthRec_code/NARM/datasetsHealth/TN/train.txt filter=lfs diff=lfs merge=lfs -text
HealthRec/Benchmarks/EB_NeRD/EB-NERD_behavior.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40cdbabced8035877274a7be00b5d12ce437aba1e65f089606f588a7e99c084b
3
+ size 33969603
HealthRec/Benchmarks/EB_NeRD/EB_NERD_data.csv ADDED
The diff for this file is too large to render. See raw diff
 
HealthRec/Benchmarks/QB/QB_behaviour.tsv ADDED
The diff for this file is too large to render. See raw diff
 
HealthRec/Benchmarks/QB/QB_data.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:170573963f5f65f27008db20a26c15877e8c647992c02c17bd7cc2e0ade1c117
3
+ size 1140243
HealthRec/Benchmarks/TN/TN_behaviour.tsv ADDED
The diff for this file is too large to render. See raw diff
 
HealthRec/Benchmarks/TN/TN_data.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5268afb665917d3613268a451572c65cf5bda9f31c9e1c5cf60d566779a1ce03
3
+ size 735550
HealthRec/Benchmarks/readme.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #### Investigating Recommender Systems from the Healthiness Perspective: Benchmarks, Warnings and Enhancement
2
+
3
+ This contains the constructed Datasets, TN, QB and EB-NeRD for our work "Investigating Recommender Systems from the Healthiness Perspective: Benchmarks, Warnings and Enhancement" which aims to investigate recommender systems from a healthiness perspective.
4
+
5
+ These three datasets are enriched from existing content recommendation datasets, where we add healthiness-related information into the original datasets, including healthiness tag (healthy or harmful), and corresponding reasons that provide justifications for the assigned tags.
6
+
7
+ #### Item content information
8
+
9
+ TN_data.xlsx, QB_data.xlsx and EB_NERD_data.xlsx are item content information which mainly contains:
10
+
11
+ asin: original item ID.
12
+
13
+ tag: A binary label that classifies content as either healthy or harmful. 0 indicates harmful, and 1 indicates healthy.
14
+
15
+ reason: A textual explanation that provides the detailed reason supporting the healthiness classification.
16
+
17
+ title: item title information.
18
+
19
+ title_english: English title information.
20
+
21
+ abstract: item abstract information.
22
+
23
+ abstract_english: English abstract information.
24
+
25
+ #### User behavior data
26
+
27
+ TN_behaviour.tsv, QB_behaviour.tsv and EB-NERD_behavior.tsv are user behavior data, that is, user-item interaction record, which follows the following format:
28
+
29
+ User_ID, Item_ID, Item_ID, ..., Item_ID
30
+
31
+ it indicates that a user (User_ID) has interacted with the following items.
32
+
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8758eb8e206047fbdf707cf36202577d679e62b5ec4cc7a54d15c695fedefa2b
3
+ size 8263328
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5706ac3c6e8273608204edde9ff8df523719c5bfbf7d79d8d49044d63689d1c6
3
+ size 8263328
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/test.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1af0f054e9fe068851e078f4946ae0bcbe363f7d215ace4d175ade5f9c1facaa
3
+ size 711525
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/NERD/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01030665e746b1da26a4cfe598e5cbba77e09db14cc8f694f0b282dc0cb341a7
3
+ size 6429180
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48330dbc4c2fca6d95ac7e17a181e5824bb602cf2c93996670642254eee26a5
3
+ size 4751328
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6adfa8e769d49b084bb341b2f4c9b7af25c25beb9fb79e5eaeab46a134b8a646
3
+ size 4751328
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/test.txt ADDED
Binary file (59.1 kB). View file
 
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/QB/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42bce183da5fad8fe7479527e80bb4749611122fc39657393173a65744921d8
3
+ size 526672
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfb9acc77f001da4f778233a7713e34355df5b809841dff3a5229b2e0815f53b
3
+ size 2604128
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:818d7d4cb5d306c1d8d6ceb4b13efee47d2686e5d6d8a71df44ce89fc5ffe658
3
+ size 2604128
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/test.txt ADDED
Binary file (53.6 kB). View file
 
HealthRec/HealthRec_code/BERT4Rec/BERTHealth/TN/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6046fc536c91e604d7e5fcc0aa7fd4f8c0dd2e5026a35bf007e32d465203e6a4
3
+ size 520159
HealthRec/HealthRec_code/BERT4Rec/Bert4Rec.py ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import torch.nn as nn
4
+ from torch.optim.optimizer import Optimizer
5
+ import math
6
+ import random
7
+ import numpy as np
8
+ import pandas as pd
9
+ from torch.utils.data import Dataset
10
+ import tqdm
11
+ from matplotlib import pyplot as plt
12
+ import torch.backends.cudnn as cudnn
13
+ from copy import deepcopy
14
+ import os
15
+ import datetime
16
+ import pickle
17
+
18
+
19
+
20
+ # seed = 1
21
+ # random.seed(seed)
22
+ # torch.manual_seed(seed)
23
+ # torch.cuda.manual_seed_all(seed)
24
+ # np.random.seed(seed)
25
+ cudnn.deterministic = True
26
+ cudnn.benchmark = False
27
+ device = torch.device("cuda")
28
+ # device = torch.device("cpu")
29
+
30
+ session_length = 20
31
+ batch_size = 512 #512
32
+ plot_num = 5000
33
+ epochs = 30
34
+
35
+
36
+ class SessionData(object):
37
+ def __init__(self, session_index, session_id, items_indexes):
38
+ self.session_index = session_index
39
+ self.session_id = session_id
40
+ self.item_list = items_indexes
41
+
42
+ def generate_seq_datas(self, session_length, padding_idx=0, predict_length=1):
43
+ sessions = []
44
+ if len(self.item_list) < 2:
45
+ self.item_list.append[self.item_list[0]]
46
+ if predict_length == 1:
47
+ for i in range(len(self.item_list) - 1):
48
+ if i < session_length:
49
+ train_data = [0 for _ in range(session_length - i - 1)]
50
+ train_data.extend(self.item_list[:i + 1])
51
+ train_data.append(self.item_list[i + 1])
52
+ else:
53
+ train_data = self.item_list[i + 1 - session_length:i + 1]
54
+ train_data.append(self.item_list[i + 1])
55
+ sessions.append(train_data)
56
+ else:
57
+ pass
58
+ return self.session_index, sessions
59
+
60
+ def __str__(self):
61
+ info = " session index = {}\n session id = {} \n the length of item list= {} \n the fisrt item index in item list is {}".format(
62
+ self.session_index, self.session_id, len(self.item_list), self.item_list[0])
63
+ return info
64
+
65
+
66
+ class SessionDataSet(object):
67
+ def __init__(self, train_file, test_file, padding_idx=0):
68
+ super(SessionDataSet, self).__init__()
69
+ self.index_count = 0
70
+ self.session_count = 0
71
+ self.train_count = 0
72
+ self.test_count = 0
73
+ self.max_session_length = 0
74
+
75
+ self.padding_idx = padding_idx
76
+ self.item2index = dict()
77
+ self.index2item = dict()
78
+ self.session2index = dict()
79
+ self.index2session = dict()
80
+ self.item_total_num = dict()
81
+ self.item2index["<pad>"] = padding_idx
82
+ self.index2item[padding_idx] = "<pad>"
83
+ self.train_data = self.load_data(train_file)
84
+ print("training set is loaded, # index: ", len(self.item2index.keys()))
85
+ self.train_count = self.session_count
86
+ print("train_session_num", self.train_count)
87
+ self.test_data = self.load_data(test_file)
88
+ print("testing set is loaded, # index: ", len(self.index2item.keys()))
89
+ print("# item", self.index_count)
90
+ self.test_count = self.session_count - self.train_count
91
+ print("# test session:", self.test_count)
92
+ self.all_training_data = []
93
+ self.all_testing_data = []
94
+ self.all_meta_training_data = []
95
+ self.all_meta_testing_data = []
96
+ self.train_session_length = 0
97
+ self.test_session_length = 0
98
+
99
+ def load_data(self, file_path):
100
+ data = pickle.load(open(file_path, 'rb'))
101
+ session_ids = data[0]
102
+ session_data = data[1]
103
+ session_label = data[2]
104
+
105
+ result_data = []
106
+ lenth = len(session_ids)
107
+ print("# session", lenth)
108
+
109
+ last_session_id = session_ids[0]
110
+
111
+ session_item_indexes = []
112
+
113
+ for item_id in session_data[0]:
114
+ if item_id not in self.item2index.keys():
115
+ self.index_count += 1
116
+ self.item2index[item_id] = self.index_count
117
+ self.index2item[self.index_count] = item_id
118
+ self.item_total_num[self.index_count] = 0
119
+ session_item_indexes.append(self.item2index[item_id])
120
+ self.item_total_num[self.item2index[item_id]] += 1
121
+ target_item = session_label[0]
122
+ if target_item not in self.item2index.keys():
123
+ self.index_count += 1
124
+ self.item2index[target_item] = self.index_count
125
+ self.index2item[self.index_count] = target_item
126
+ self.item_total_num[self.index_count] = 0
127
+ session_item_indexes.append(self.item2index[target_item])
128
+ self.item_total_num[self.item2index[target_item]] += 1
129
+
130
+ for session_id, items, target_item in zip(session_ids, session_data, session_label):
131
+ if session_id != last_session_id:
132
+
133
+ self.session_count += 1
134
+ self.session2index[last_session_id] = self.session_count
135
+ self.index2session[self.session_count] = last_session_id
136
+ if len(session_item_indexes) > self.max_session_length:
137
+ self.max_session_length = len(session_item_indexes)
138
+ new_session = SessionData(self.session_count, last_session_id, session_item_indexes)
139
+ result_data.append(new_session)
140
+ last_session_id = session_id
141
+ session_item_indexes = []
142
+ for item_id in items:
143
+ if item_id not in self.item2index.keys():
144
+ self.index_count += 1
145
+ self.item2index[item_id] = self.index_count
146
+ self.index2item[self.index_count] = item_id
147
+ self.item_total_num[self.index_count] = 0
148
+ session_item_indexes.append(self.item2index[item_id])
149
+ self.item_total_num[self.item2index[item_id]] += 1
150
+ if target_item not in self.item2index.keys():
151
+ self.index_count += 1
152
+ self.item2index[target_item] = self.index_count
153
+ self.index2item[self.index_count] = target_item
154
+ self.item_total_num[self.index_count] = 0
155
+ session_item_indexes.append(self.item2index[target_item])
156
+ self.item_total_num[self.item2index[target_item]] += 1
157
+ else:
158
+ continue
159
+
160
+ self.session_count += 1
161
+ self.session2index[last_session_id] = self.session_count
162
+ new_session = SessionData(self.session_count, last_session_id, session_item_indexes)
163
+ result_data.append(new_session)
164
+ print("loaded")
165
+ print(new_session)
166
+
167
+ return result_data
168
+
169
+ def get_batch(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train", neg_num=1,
170
+ sampling_mathod="random"):
171
+
172
+ if phase == "train":
173
+ if all_data is None:
174
+ all_data = self.get_all_training_data(session_length)
175
+ indexes = np.random.permutation(all_data.shape[0])
176
+ all_data = all_data[indexes]
177
+ else:
178
+ if all_data is None:
179
+ all_data = self.get_all_testing_data(session_length)
180
+
181
+ sindex = 0
182
+ eindex = batch_size
183
+ while eindex < all_data.shape[0]:
184
+ batch = all_data[sindex: eindex]
185
+
186
+ temp = eindex
187
+ eindex = eindex + batch_size
188
+ sindex = temp
189
+ if phase == "train":
190
+ batch = self.divid_and_extend_negative_samples(batch, session_length=session_length,
191
+ predict_length=predict_length, neg_num=neg_num,
192
+ method=sampling_mathod)
193
+ else:
194
+ batch = [batch[:, :session_length], batch[:, session_length:]]
195
+ yield batch
196
+
197
+ if eindex >= all_data.shape[0]:
198
+ batch = all_data[sindex:]
199
+ if phase == "train":
200
+ batch = self.divid_and_extend_negative_samples(batch, session_length=session_length,
201
+ predict_length=predict_length, neg_num=neg_num,
202
+ method=sampling_mathod)
203
+ else:
204
+ batch = [batch[:, :session_length], batch[:, session_length:]]
205
+ yield batch
206
+
207
+ def get_batch_with_neg(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train",
208
+ neg_num=1, sampling_mathod="random"):
209
+ if phase == "train":
210
+ all_data = self.get_all_training_data_with_neg(session_length, neg_num)
211
+ indexes = np.random.permutation(all_data.shape[0])
212
+ all_data = all_data[indexes]
213
+ else:
214
+ all_data = self.get_all_testing_data_with_neg(session_length, neg_num)
215
+
216
+ sindex = 0
217
+ eindex = batch_size
218
+ while eindex < all_data.shape[0]:
219
+ batch = all_data[sindex: eindex]
220
+
221
+ temp = eindex
222
+ eindex = eindex + batch_size
223
+ sindex = temp
224
+ if phase == "train":
225
+ batch = [batch[:, :session_length], batch[:, session_length:session_length + predict_length],
226
+ batch[:, -neg_num:]]
227
+ else:
228
+ batch = [batch[:, :session_length], batch[:, session_length:]]
229
+ yield batch
230
+
231
+ if eindex >= all_data.shape[0]:
232
+ batch = all_data[sindex:]
233
+ if phase == "train":
234
+ batch = [batch[:, :session_length], batch[:, session_length:session_length + predict_length],
235
+ batch[:, -neg_num:]]
236
+ else:
237
+ batch = [batch[:, :session_length], batch[:, session_length:]]
238
+ yield batch
239
+
240
+ def get_batch_tasks_with_neg(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train",
241
+ neg_num=1, sampling_mathod="random"):
242
+ if phase == "train":
243
+ all_data = self.get_all_meta_training_data_with_neg(session_length, neg_num)
244
+ random.shuffle(all_data)
245
+ else:
246
+ all_data = self.get_all_meta_testing_data_with_neg(session_length, neg_num)
247
+ sindex = 0
248
+ eindex = batch_size
249
+ while eindex < len(all_data):
250
+ batch = all_data[sindex: eindex]
251
+
252
+ temp = eindex
253
+ eindex = eindex + batch_size
254
+ sindex = temp
255
+
256
+ session_items = [batch[i][:, :session_length] for i in range(len(batch))]
257
+
258
+ target_item = [batch[i][:, session_length:session_length + predict_length] for i in range(len(batch))]
259
+
260
+ neg_item = [batch[i][:, -neg_num:] for i in range(len(batch))]
261
+ batch = [session_items, target_item, neg_item]
262
+ yield batch
263
+
264
+ if eindex >= len(all_data):
265
+ batch = all_data[sindex:]
266
+ session_items = [batch[i][:, :session_length] for i in range(len(batch))]
267
+
268
+ target_item = [batch[i][:, session_length:session_length + predict_length] for i in range(len(batch))]
269
+
270
+ neg_item = [batch[i][:, -neg_num:] for i in range(len(batch))]
271
+ batch = [session_items, target_item, neg_item]
272
+ yield batch
273
+
274
+ def divid_and_extend_negative_samples(self, batch_data, session_length, predict_length=1, neg_num=1,
275
+ method="random"):
276
+ """
277
+ divid and extend negative samples
278
+ """
279
+ neg_items = []
280
+ if method == "random":
281
+ for session_and_target in batch_data:
282
+ neg_item = []
283
+ for i in range(neg_num):
284
+ rand_item = random.randint(1, self.index_count)
285
+ while rand_item in session_and_target or rand_item in neg_item:
286
+ rand_item = random.randint(1, self.index_count)
287
+ neg_item.append(rand_item)
288
+ neg_items.append(neg_item)
289
+ else:
290
+
291
+ total_list = set()
292
+ for session in batch_data:
293
+ for i in session:
294
+ total_list.add(i)
295
+ total_list = list(total_list)
296
+ total_list = sorted(total_list, key=lambda item: self.item_total_num[item], reverse=True)
297
+ for i, session in enumerate(batch_data):
298
+ np.random.choice(total_list)
299
+ session_items = batch_data[:, :session_length]
300
+ target_item = batch_data[:, session_length:]
301
+ neg_items = np.array(neg_items)
302
+ return [session_items, target_item, neg_items]
303
+
304
+ def get_all_training_data(self, session_length, predict_length=1):
305
+ if len(self.all_training_data) != 0 and self.train_session_length == session_length:
306
+ return self.all_training_data
307
+ print("Start building the all training dataset")
308
+ all_sessions = []
309
+ for session_data in self.train_data:
310
+ session_index, sessions = session_data.generate_seq_datas(session_length, padding_idx=self.padding_idx)
311
+ if sessions is not None:
312
+ all_sessions.extend(sessions)
313
+ all_sessions = np.array(all_sessions)
314
+ self.all_training_data = all_sessions
315
+ self.train_session_length = session_length
316
+ print("The total number of training samples is", all_sessions.shape)
317
+ return all_sessions
318
+
319
+ def get_all_testing_data(self, session_length, predict_length=1):
320
+ if len(self.all_testing_data) != 0 and self.test_session_length == session_length:
321
+ return self.all_testing_data
322
+ all_sessions = []
323
+ for session_data in self.test_data:
324
+ session_index, sessions = session_data.generate_seq_datas(session_length, padding_idx=self.padding_idx)
325
+ if sessions is not None:
326
+ all_sessions.extend(sessions)
327
+ all_sessions = np.array(all_sessions)
328
+ self.all_testing_data = all_sessions
329
+ self.test_session_length = session_length
330
+ print("The total number of testing samples is", all_sessions.shape)
331
+ return all_sessions
332
+
333
+ def __getitem__(self, idx):
334
+ pass
335
+
336
+ def __len__(self):
337
+ pass
338
+
339
+
340
+ def bpr_loss(r):
341
+ return torch.sum(-torch.log(torch.sigmoid(r)))
342
+
343
+
344
+ def get_hit_num(pred, y_truth):
345
+ """
346
+ pred: numpy type(batch_size,k)
347
+ y_truth: list type (batch_size,groudtruth_num)
348
+ """
349
+
350
+ hit_num = 0
351
+ for i in range(len(y_truth)):
352
+ for value in y_truth[i]:
353
+ hit_num += np.sum(pred[i] == value)
354
+ return hit_num
355
+
356
+
357
+ def get_rr(pred, y_truth):
358
+ rr = 0.
359
+ for i in range(len(y_truth)):
360
+ for value in y_truth[i]:
361
+ hit_indexes = np.where(pred[i] == value)[0]
362
+ for hit_index in hit_indexes:
363
+ rr += 1 / (hit_index + 1)
364
+ return rr
365
+
366
+
367
+ def get_dcg(pred, y_truth):
368
+ y_pred_score = np.zeros_like(pred)
369
+
370
+ for i in range(len(y_truth)):
371
+
372
+ for j, y_pred in enumerate(pred[i]):
373
+ if y_pred == y_truth[i][0]:
374
+ y_pred_score[i][j] = 1
375
+ gain = 2 ** y_pred_score - 1
376
+ discounts = np.tile(np.log2(np.arange(pred.shape[1]) + 2), (len(y_truth), 1))
377
+ dcg = np.sum(gain / discounts, axis=1)
378
+ return dcg
379
+
380
+
381
+ def get_ndcg(pred, y_truth):
382
+ dcg = get_dcg(pred, y_truth)
383
+ idcg = get_dcg(np.concatenate((y_truth, np.zeros_like(pred)[:, :-1] - 1), axis=1), y_truth)
384
+ ndcg = np.sum(dcg / idcg)
385
+
386
+ return ndcg
387
+
388
+
389
+ def dcg_score(y_pre, y_true, k):
390
+ y_pre_score = np.zeros(k)
391
+ if len(y_pre) > k:
392
+ y_pre = y_pre[:k]
393
+ for i in range(len(y_pre)):
394
+ pre_tag = y_pre[i]
395
+ if pre_tag in y_true:
396
+ y_pre_score[i] = 1
397
+ gain = 2 ** y_pre_score - 1
398
+ discounts = np.log2(np.arange(k) + 2)
399
+ return np.sum(gain / discounts)
400
+
401
+
402
+ def ndcg_score(y_pre, y_true, k=5):
403
+ dcg = dcg_score(y_pre, y_true, k)
404
+ idcg = dcg_score(y_true, y_true, k)
405
+ return dcg / idcg
406
+
407
+
408
+ loss_function = torch.nn.CrossEntropyLoss()
409
+
410
+
411
+ class MultiHeadSelfAttention(torch.nn.Module):
412
+ def __init__(self, hidden_size, activate="relu", head_num=2, dropout=0, initializer_range=0.02):
413
+ super(MultiHeadSelfAttention, self).__init__()
414
+ self.config = list()
415
+
416
+ self.hidden_size = hidden_size
417
+
418
+ self.head_num = head_num
419
+ if (self.hidden_size) % head_num != 0:
420
+ raise ValueError(self.head_num, "error")
421
+ self.head_dim = self.hidden_size // self.head_num
422
+
423
+ self.query = torch.nn.Linear(self.hidden_size, self.hidden_size)
424
+ self.key = torch.nn.Linear(self.hidden_size, self.hidden_size)
425
+ self.value = torch.nn.Linear(self.hidden_size, self.hidden_size)
426
+ self.concat_weight = torch.nn.Linear(self.hidden_size, self.hidden_size, bias=False)
427
+ torch.nn.init.normal_(self.query.weight, 0, initializer_range)
428
+ torch.nn.init.normal_(self.key.weight, 0, initializer_range)
429
+ torch.nn.init.normal_(self.value.weight, 0, initializer_range)
430
+ torch.nn.init.normal_(self.concat_weight.weight, 0, initializer_range)
431
+ self.dropout = torch.nn.Dropout(dropout)
432
+
433
+ def dot_score(self, encoder_output):
434
+ query = self.dropout(self.query(encoder_output))
435
+ key = self.dropout(self.key(encoder_output))
436
+ # head_num * batch_size * session_length * head_dim
437
+ querys = torch.stack(query.chunk(self.head_num, -1), 0)
438
+ keys = torch.stack(key.chunk(self.head_num, -1), 0)
439
+ # head_num * batch_size * session_length * session_length
440
+ dots = querys.matmul(keys.permute(0, 1, 3, 2)) / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float))
441
+ # print(len(dots),dots[0].shape)
442
+ return dots
443
+
444
+ def forward(self, encoder_outputs, mask=None):
445
+ attention_energies = self.dot_score(encoder_outputs)
446
+ value = self.dropout(self.value(encoder_outputs))
447
+
448
+ values = torch.stack(value.chunk(self.head_num, -1))
449
+
450
+ if mask is not None:
451
+ eye = torch.eye(mask.shape[-1]).to(device)
452
+ new_mask = torch.clamp_max((1 - (1 - mask.float()).unsqueeze(1).permute(0, 2, 1).bmm(
453
+ (1 - mask.float()).unsqueeze(1))) + eye, 1)
454
+ attention_energies = attention_energies - new_mask * 1e12
455
+ weights = F.softmax(attention_energies, dim=-1)
456
+ weights = weights * (1 - new_mask)
457
+ else:
458
+ weights = F.softmax(attention_energies, dim=2)
459
+
460
+ # head_num * batch_size * session_length * head_dim
461
+ outputs = weights.matmul(values)
462
+ # batch_size * session_length * hidden_size
463
+ outputs = torch.cat([outputs[i] for i in range(outputs.shape[0])], dim=-1)
464
+ outputs = self.dropout(self.concat_weight(outputs))
465
+
466
+ return outputs
467
+
468
+
469
+ class PositionWiseFeedForward(torch.nn.Module):
470
+ def __init__(self, hidden_size, initializer_range=0.02):
471
+ super(PositionWiseFeedForward, self).__init__()
472
+ self.final1 = torch.nn.Linear(hidden_size, hidden_size * 4, bias=True)
473
+ self.final2 = torch.nn.Linear(hidden_size * 4, hidden_size, bias=True)
474
+ torch.nn.init.normal_(self.final1.weight, 0, initializer_range)
475
+ torch.nn.init.normal_(self.final2.weight, 0, initializer_range)
476
+
477
+ def forward(self, x):
478
+ x = F.gelu(self.final1(x))
479
+ x = self.final2(x)
480
+ return x
481
+
482
+
483
+ class TransformerLayer(torch.nn.Module):
484
+ def __init__(self, hidden_size, activate="relu", head_num=2, dropout=0, attention_dropout=0,
485
+ initializer_range=0.02):
486
+ super(TransformerLayer, self).__init__()
487
+ self.dropout = torch.nn.Dropout(dropout)
488
+ self.mh = MultiHeadSelfAttention(hidden_size=hidden_size, activate=activate, head_num=head_num,
489
+ dropout=attention_dropout, initializer_range=initializer_range)
490
+ self.pffn = PositionWiseFeedForward(hidden_size, initializer_range=initializer_range)
491
+ self.layer_norm = torch.nn.LayerNorm(hidden_size)
492
+ self.dropout = torch.nn.Dropout(dropout)
493
+
494
+ def forward(self, encoder_outputs, mask=None):
495
+ encoder_outputs = self.layer_norm(encoder_outputs + self.dropout(self.mh(encoder_outputs, mask)))
496
+ encoder_outputs = self.layer_norm(encoder_outputs + self.dropout(self.pffn(encoder_outputs)))
497
+ return encoder_outputs
498
+
499
+
500
+ class BERT(torch.nn.Module):
501
+ def __init__(self, hidden_size=100, itemNum=0, posNum=0, padding_idx=0, dropout=0.5, attention_dropout=0,
502
+ head_num=2, sa_layer_num=1,
503
+ activate="relu", initializer_range=0.02):
504
+ super(BERT, self).__init__()
505
+ self.hidden_size = hidden_size
506
+ self.head_num = head_num
507
+ self.session_length = session_length
508
+ self.sa_layer_num = sa_layer_num
509
+ self.transformers = torch.nn.ModuleList([TransformerLayer(hidden_size, head_num=head_num, dropout=dropout,
510
+ attention_dropout=attention_dropout,
511
+ initializer_range=initializer_range) for _ in
512
+ range(sa_layer_num)])
513
+
514
+ def forward(self, compute_output, attention_mask):
515
+ for sa_i in range(self.sa_layer_num):
516
+ compute_output = self.transformers[sa_i](compute_output, attention_mask)
517
+ return compute_output
518
+
519
+
520
+ class BERT4Rec(torch.nn.Module):
521
+ def __init__(self, hidden_size=64, itemNum=0, posNum=0, padding_idx=0, dropout=0.5, attention_dropout=0, head_num=2,
522
+ sa_layer_num=1,
523
+ activate="relu", initializer_range=0.02):
524
+ super(BERT4Rec, self).__init__()
525
+ self.padding_idx = padding_idx
526
+ self.hidden_size = hidden_size
527
+ self.head_num = head_num
528
+ self.session_length = session_length
529
+ self.sa_layer_num = sa_layer_num
530
+ self.activate = torch.relu
531
+ self.dropout = torch.nn.Dropout(dropout)
532
+
533
+ self.mask_index = torch.tensor(itemNum + 1).to(device)
534
+ self.mask_position = torch.tensor(posNum + 1).to(device)
535
+ self.item_embedding = torch.nn.Embedding(itemNum + 2, hidden_size, padding_idx=self.padding_idx)
536
+ self.position_embedding = torch.nn.Embedding(posNum + 2, hidden_size, padding_idx=self.padding_idx)
537
+ self.bert = BERT(hidden_size=hidden_size, dropout=dropout, attention_dropout=attention_dropout,
538
+ head_num=head_num, sa_layer_num=sa_layer_num,
539
+ activate=activate, initializer_range=initializer_range)
540
+
541
+ # text_emb_path = './BERTHealth/QB/H_title_emb100.npy'
542
+ # textWeights = np.load(text_emb_path)
543
+ # self.item_embedding.weight.data.copy_(torch.from_numpy(textWeights))
544
+
545
+ torch.nn.init.normal_(self.item_embedding.weight, 0, initializer_range)
546
+ torch.nn.init.constant_(self.item_embedding.weight[0], 0)
547
+ torch.nn.init.normal_(self.position_embedding.weight, 0, initializer_range)
548
+ torch.nn.init.constant_(self.position_embedding.weight[0], 0)
549
+ self.projection = torch.nn.Linear(hidden_size, hidden_size, bias=True)
550
+ torch.nn.init.normal_(self.projection.weight, 0, initializer_range)
551
+ self.output_bias = torch.nn.Parameter(torch.zeros(itemNum, ))
552
+ self.layer_norm = torch.nn.LayerNorm(hidden_size)
553
+
554
+ def forward(self, session, mask_indexes=None):
555
+
556
+ mask = (session != 0).float()
557
+
558
+ mask = mask.unsqueeze(2).repeat((1, 1, self.hidden_size))
559
+ session_item_embeddings = self.item_embedding(session) * mask
560
+ positions = torch.arange(0, session.shape[1]).unsqueeze(0).repeat((session.shape[0], 1)).to(device)
561
+ session_position_embeddings = self.position_embedding(positions) * mask
562
+ session_item_vecs = self.dropout(self.layer_norm(session_item_embeddings + session_position_embeddings))
563
+ attention_mask = (session == self.padding_idx)
564
+ if mask_indexes is not None:
565
+ compute_output = self.dropout(self.bert(session_item_vecs, attention_mask).gather(1, mask_indexes))
566
+ else:
567
+ compute_output = self.dropout(self.bert(session_item_vecs, attention_mask)[:, -1, :])
568
+ compute_output = F.gelu(self.dropout(self.projection(compute_output)))
569
+ result = torch.matmul(compute_output, self.item_embedding.weight[1:-1].t()) + self.output_bias
570
+ return result
571
+
572
+ def predict_top_k(self, session, k=20):
573
+ result = self.forward(session)
574
+ result = torch.topk(result, k, dim=1)[1]
575
+
576
+ return result
577
+
578
+
579
+
580
+
581
+
582
+ def train(args):
583
+ hidden_size = args["hidden_size"] if "hidden_size" in args.keys() else 100
584
+ attention_dropout = args["attention_dropout"] if "attention_dropout" in args.keys() else 0.2
585
+ dropout = args["dropout"] if "dropout" in args.keys() else 0.5
586
+ lr = args["lr"] if "lr" in args.keys() else 5e-4
587
+ sa_layer_num = args["sa_layer_num"] if "sa_layer_num" in args.keys() else 1
588
+ amsgrad = args["amsgrad"] if "amsgrad" in args.keys() else True
589
+ session_length = args["session_length"] if "session_length" in args.keys() else 200
590
+ head_num = args["head_num"] if "head_num" in args.keys() else 1
591
+ model = BERT4Rec(hidden_size=hidden_size, itemNum=dataset.index_count, posNum=session_length, padding_idx=0,
592
+ dropout=dropout,
593
+ activate="selu", attention_dropout=attention_dropout, head_num=head_num,
594
+ sa_layer_num=sa_layer_num).to(device)
595
+ opti = torch.optim.Adam(model.parameters(), lr=lr)
596
+ patience = args["patience"] if "patience" in args.keys() else 5
597
+ best_model_hr = 0.0
598
+ best_model_mrr = 0.0
599
+ best_r1m = 0.0
600
+ best_model = None
601
+ predict_nums = [1, 5, 10, 20]
602
+ no_improvement_epoch = 0
603
+ start_train_time = datetime.datetime.now()
604
+ for epoch in range(epochs):
605
+ batch_losses = []
606
+ epoch_losses = []
607
+ model.train()
608
+ for i, batch_data in enumerate(dataset.get_batch(batch_size, session_length, phase="train")):
609
+ mask_item = torch.ones_like(torch.tensor(batch_data[1])) * dataset.index_count + 1
610
+ sessions = torch.cat([torch.tensor(batch_data[0]), mask_item], dim=-1)
611
+ target_items = torch.tensor(batch_data[1]).squeeze().to(device) - 1
612
+ result_pos = model(sessions.to(device))
613
+ loss = loss_function(result_pos, target_items)
614
+ opti.zero_grad()
615
+ loss.backward()
616
+ opti.step()
617
+ batch_losses.append(loss.cpu().detach().numpy())
618
+ epoch_losses.append(loss.cpu().detach().numpy())
619
+ if i % plot_num == 0:
620
+ time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
621
+ print("[%s] [%d/%d] %d mean_batch_loss : %0.6f" % (time, epoch + 1, epochs, i, np.mean(batch_losses)))
622
+ batch_losses = []
623
+
624
+ model.eval()
625
+ with torch.no_grad():
626
+ start_test_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
627
+ print("Start predicting", start_test_time)
628
+ rrs = [0 for _ in range(len(predict_nums))]
629
+ hit_nums = [0 for _ in range(len(predict_nums))]
630
+ ndcgs = [0 for _ in range(len(predict_nums))]
631
+ seq_save = []
632
+ label_save = []
633
+ pre_save = []
634
+ for i, batch_data in enumerate(dataset.get_batch(batch_size, session_length, phase="test")):
635
+ mask_item = torch.ones_like(torch.tensor(batch_data[1])) * dataset.index_count + 1
636
+ sessions = torch.cat([torch.tensor(batch_data[0]), mask_item], dim=-1).to(device)
637
+
638
+ target_items = np.array(batch_data[1]) - 1
639
+ y_pred = model.predict_top_k(sessions, 20).cpu().numpy()
640
+
641
+ # top-k item ID number
642
+ pre_k = y_pred + 1
643
+ seq_temp = sessions.tolist()
644
+ seq_save += seq_temp
645
+ label_save += np.array(batch_data[1]).flatten().tolist()
646
+ pre_save += pre_k.tolist()
647
+
648
+ for j, predict_num in enumerate(predict_nums):
649
+ hit_nums[j] += get_hit_num(y_pred[:, :predict_num], target_items)
650
+ rrs[j] += get_rr(y_pred[:, :predict_num], target_items)
651
+ ndcgs[j] += get_ndcg(y_pred[:, :predict_num], target_items)
652
+
653
+ end_test_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
654
+
655
+ hrs = [hit_num / len(dataset.all_testing_data) for hit_num in hit_nums]
656
+ mrrs = [rr / len(dataset.all_testing_data) for rr in rrs]
657
+ mndcgs = [ndcg / len(dataset.all_testing_data) for ndcg in ndcgs]
658
+ if hrs[-1] + mrrs[-1] > best_r1m:
659
+ # print("change best")
660
+ best_model = deepcopy(model)
661
+ # best_model_hr = hrs[-1]
662
+ # best_model_mrr = mrrs[-1]
663
+ # best_r1m = hrs[-1] + mrrs[-1]
664
+ best_model_hr = hrs
665
+ best_model_mrr = mrrs
666
+ best_model_ndcg = mndcgs
667
+ best_r1m = hrs[-1] + mrrs[-1]
668
+ no_improvement_epoch = 0
669
+ pre_save_path = "./BERTHealth/" + dataset_name + "/prediction_health.txt"
670
+ res_pre = (seq_save, label_save, pre_save)
671
+ pickle.dump(res_pre, open(pre_save_path, 'wb'))
672
+ else:
673
+ no_improvement_epoch += 1
674
+ print("testing finish [%s] " % end_test_time)
675
+ for k, predict_num in enumerate(predict_nums):
676
+ print("\tHR@%d=%.5f MRR@%d=%.5f NDCG@%d=%.5f" % (
677
+ predict_num, hrs[k], predict_num, mrrs[k], predict_num, mndcgs[k]))
678
+ if no_improvement_epoch >= patience:
679
+ print("early stopping")
680
+ break
681
+ end_train_time = datetime.datetime.now()
682
+
683
+
684
+
685
+ print("training and testting over, Total time", end_train_time - start_train_time)
686
+ return best_model, best_model_hr, best_model_mrr, best_model_ndcg
687
+
688
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
689
+ # TN/QB/NERD
690
+ dataset_name = 'TN'
691
+ train_path = './BERTHealth/' + dataset_name + '/train.txt'
692
+ test_path = './BERTHealth/' + dataset_name + '/test.txt'
693
+ dataset = SessionDataSet(train_file=train_path, test_file=test_path)
694
+ # torch.cuda.set_device(1)
695
+ epochs = 50
696
+ hidden_sizes = [100] # rr:100 dn:100
697
+ dropouts = [0.3] # rr:0.3 dn:0.3
698
+ attention_dropouts = [0] # rr:0 dn:0
699
+ lrs = [1e-3] # rr:1e-3 dn:5e-4
700
+ session_lengths = [50] # rr:50 dn:50
701
+ sa_layer_nums = [4] # rr:4 dn:4
702
+ patience = 10
703
+ head_nums = [2] # rr:2 dn:4, 32 dimension each head
704
+ amsgrads = [True]
705
+ best_params = ""
706
+ best_all_model = 0.0
707
+ best_all_hr = 0.0
708
+ best_all_mrr = 0.0
709
+ best_all_r1m = 0.0
710
+ print('datasets: ',dataset_name)
711
+ for session_length in session_lengths:
712
+ for hidden_size, head_num in zip(hidden_sizes, head_nums):
713
+ for amsgrad in amsgrads:
714
+ for attention_dropout in attention_dropouts:
715
+ for dropout in dropouts:
716
+ for lr in lrs:
717
+ for sa_layer_num in sa_layer_nums:
718
+ # for head_num in head_nums:
719
+ args = {}
720
+ print(
721
+ "current model hyper-parameters: session_length=%d, hidden_size=%d, lr=%.4f,head_num=%d, amsgrad=%s, attention_dropout=%.2f, dropout=%.2f, sa_layer_num=%d. \n" % (
722
+ session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
723
+ dropout,
724
+ sa_layer_num))
725
+ args["session_length"] = session_length
726
+ args["hidden_size"] = hidden_size
727
+ args["amsgrad"] = amsgrad
728
+ args["attention_dropout"] = attention_dropout
729
+ args["dropout"] = dropout
730
+ args["sa_layer_num"] = sa_layer_num
731
+ args["lr"] = lr
732
+ args["head_num"] = head_num
733
+ args["patience"] = patience
734
+ best_model, best_model_hr, best_model_mrr, best_model_ndcg = train(args)
735
+ # if best_model_hr + best_model_mrr > best_all_r1m:
736
+ # print("best model change")
737
+ # best_all_r1m = best_model_hr + best_model_mrr
738
+ # best_all_hr = best_model_hr
739
+ # best_all_mrr = best_model_mrr
740
+ # best_all_model = best_model
741
+ # best_params = "session_length-%d, hidden_size-%d, lr-%.4f,head_num=%d, amsgrad-%s, attention_dropout-%.2f, dropout-%.2f, sa_layer_num-%d" % (
742
+ # session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
743
+ # dropout,
744
+ # sa_layer_num)
745
+ # best_model = None
746
+ # print(
747
+ # "current model hyper-parameters: session_length=%d, hidden_size=%d, lr=%.4f,head_num=%d, amsgrad=%s, attention_dropout=%.2f, dropout=%.2f, sa_layer_num=%d. \n" % (
748
+ # session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
749
+ # dropout,
750
+ # sa_layer_num))
751
+ print("current model hyper-parameters: session_length=%d, hidden_size=%d, head_num=%d, sa_layer_num=%d. \n" % (session_length, hidden_size, head_num, sa_layer_num))
752
+ # print("current model HR@20=%.5f MRR@20=%.5f." % (best_model_hr, best_model_mrr))
753
+ print('P@1\tP@5\tM@5\tN@5\tP@10\tM@10\tN@10\tP@20\tM@20\tN@20\t')
754
+ print("%.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % (
755
+ best_model_hr[0]*100, best_model_hr[1]*100, best_model_mrr[1]*100, best_model_ndcg[1]*100, best_model_hr[2]*100, best_model_mrr[2]*100, best_model_ndcg[2]*100, best_model_hr[3]*100, best_model_mrr[3]*100, best_model_ndcg[3]*100))
756
+
757
+
758
+ # CKLLM 存储训练好的item embs,padding 0
759
+ # save_path = './BERTCKLLM/' + dataset_name + '/item_embs.pth'
760
+ # torch.save(best_model.item_embedding.state_dict(), save_path)
761
+ # print("The best result HR@20=%.5f MRR@20=%.5f, hyper-parameters: %s. " % (best_all_hr, best_all_mrr, best_params))
762
+ print(dataset_name)
763
+ print("over.")
HealthRec/HealthRec_code/BERT4Rec/Bert4RecHealth.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import torch.nn as nn
4
+ from torch.optim.optimizer import Optimizer
5
+ import math
6
+ import random
7
+ import numpy as np
8
+ import pandas as pd
9
+ from torch.utils.data import Dataset
10
+ import tqdm
11
+ from matplotlib import pyplot as plt
12
+ import torch.backends.cudnn as cudnn
13
+ from copy import deepcopy
14
+ import os
15
+ import datetime
16
+ import pickle
17
+
18
+
19
+
20
+ # seed = 1
21
+ # random.seed(seed)
22
+ # torch.manual_seed(seed)
23
+ # torch.cuda.manual_seed_all(seed)
24
+ # np.random.seed(seed)
25
+ cudnn.deterministic = True
26
+ cudnn.benchmark = False
27
+ device = torch.device("cuda")
28
+ # device = torch.device("cpu")
29
+
30
+ session_length = 20
31
+ batch_size = 512 #512
32
+ plot_num = 5000
33
+ epochs = 30
34
+
35
+
36
+ class SessionData(object):
37
+ def __init__(self, session_index, session_id, items_indexes):
38
+ self.session_index = session_index
39
+ self.session_id = session_id
40
+ self.item_list = items_indexes
41
+
42
+ def generate_seq_datas(self, session_length, padding_idx=0, predict_length=1):
43
+ sessions = []
44
+ if len(self.item_list) < 2:
45
+ self.item_list.append[self.item_list[0]]
46
+ if predict_length == 1:
47
+ for i in range(len(self.item_list) - 1):
48
+ if i < session_length:
49
+ train_data = [0 for _ in range(session_length - i - 1)]
50
+ train_data.extend(self.item_list[:i + 1])
51
+ train_data.append(self.item_list[i + 1])
52
+ else:
53
+ train_data = self.item_list[i + 1 - session_length:i + 1]
54
+ train_data.append(self.item_list[i + 1])
55
+ sessions.append(train_data)
56
+ else:
57
+ pass
58
+ return self.session_index, sessions
59
+
60
+ def __str__(self):
61
+ info = " session index = {}\n session id = {} \n the length of item list= {} \n the fisrt item index in item list is {}".format(
62
+ self.session_index, self.session_id, len(self.item_list), self.item_list[0])
63
+ return info
64
+
65
+
66
+ class SessionDataSet(object):
67
+ def __init__(self, train_file, test_file, padding_idx=0):
68
+ super(SessionDataSet, self).__init__()
69
+ self.index_count = 0
70
+ self.session_count = 0
71
+ self.train_count = 0
72
+ self.test_count = 0
73
+ self.max_session_length = 0
74
+
75
+ self.padding_idx = padding_idx
76
+ self.item2index = dict()
77
+ self.index2item = dict()
78
+ self.session2index = dict()
79
+ self.index2session = dict()
80
+ self.item_total_num = dict()
81
+ self.item2index["<pad>"] = padding_idx
82
+ self.index2item[padding_idx] = "<pad>"
83
+ self.train_data = self.load_data(train_file)
84
+ print("training set is loaded, # index: ", len(self.item2index.keys()))
85
+ self.train_count = self.session_count
86
+ print("train_session_num", self.train_count)
87
+ self.test_data = self.load_data(test_file)
88
+ print("testing set is loaded, # index: ", len(self.index2item.keys()))
89
+ print("# item", self.index_count)
90
+ self.test_count = self.session_count - self.train_count
91
+ print("# test session:", self.test_count)
92
+ self.all_training_data = []
93
+ self.all_testing_data = []
94
+ self.all_meta_training_data = []
95
+ self.all_meta_testing_data = []
96
+ self.train_session_length = 0
97
+ self.test_session_length = 0
98
+
99
+ def load_data(self, file_path):
100
+ data = pickle.load(open(file_path, 'rb'))
101
+ session_ids = data[0]
102
+ session_data = data[1]
103
+ session_label = data[2]
104
+
105
+ result_data = []
106
+ lenth = len(session_ids)
107
+ print("# session", lenth)
108
+
109
+ last_session_id = session_ids[0]
110
+
111
+ session_item_indexes = []
112
+
113
+ for item_id in session_data[0]:
114
+ if item_id not in self.item2index.keys():
115
+ self.index_count += 1
116
+ self.item2index[item_id] = self.index_count
117
+ self.index2item[self.index_count] = item_id
118
+ self.item_total_num[self.index_count] = 0
119
+ session_item_indexes.append(self.item2index[item_id])
120
+ self.item_total_num[self.item2index[item_id]] += 1
121
+ target_item = session_label[0]
122
+ if target_item not in self.item2index.keys():
123
+ self.index_count += 1
124
+ self.item2index[target_item] = self.index_count
125
+ self.index2item[self.index_count] = target_item
126
+ self.item_total_num[self.index_count] = 0
127
+ session_item_indexes.append(self.item2index[target_item])
128
+ self.item_total_num[self.item2index[target_item]] += 1
129
+
130
+ for session_id, items, target_item in zip(session_ids, session_data, session_label):
131
+ if session_id != last_session_id:
132
+
133
+ self.session_count += 1
134
+ self.session2index[last_session_id] = self.session_count
135
+ self.index2session[self.session_count] = last_session_id
136
+ if len(session_item_indexes) > self.max_session_length:
137
+ self.max_session_length = len(session_item_indexes)
138
+ new_session = SessionData(self.session_count, last_session_id, session_item_indexes)
139
+ result_data.append(new_session)
140
+ last_session_id = session_id
141
+ session_item_indexes = []
142
+ for item_id in items:
143
+ if item_id not in self.item2index.keys():
144
+ self.index_count += 1
145
+ self.item2index[item_id] = self.index_count
146
+ self.index2item[self.index_count] = item_id
147
+ self.item_total_num[self.index_count] = 0
148
+ session_item_indexes.append(self.item2index[item_id])
149
+ self.item_total_num[self.item2index[item_id]] += 1
150
+ if target_item not in self.item2index.keys():
151
+ self.index_count += 1
152
+ self.item2index[target_item] = self.index_count
153
+ self.index2item[self.index_count] = target_item
154
+ self.item_total_num[self.index_count] = 0
155
+ session_item_indexes.append(self.item2index[target_item])
156
+ self.item_total_num[self.item2index[target_item]] += 1
157
+ else:
158
+ continue
159
+
160
+ self.session_count += 1
161
+ self.session2index[last_session_id] = self.session_count
162
+ new_session = SessionData(self.session_count, last_session_id, session_item_indexes)
163
+ result_data.append(new_session)
164
+ print("loaded")
165
+ print(new_session)
166
+
167
+ return result_data
168
+
169
+ def get_batch(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train", neg_num=1,
170
+ sampling_mathod="random"):
171
+
172
+ if phase == "train":
173
+ if all_data is None:
174
+ all_data = self.get_all_training_data(session_length)
175
+ indexes = np.random.permutation(all_data.shape[0])
176
+ all_data = all_data[indexes]
177
+ else:
178
+ if all_data is None:
179
+ all_data = self.get_all_testing_data(session_length)
180
+
181
+ sindex = 0
182
+ eindex = batch_size
183
+ while eindex < all_data.shape[0]:
184
+ batch = all_data[sindex: eindex]
185
+
186
+ temp = eindex
187
+ eindex = eindex + batch_size
188
+ sindex = temp
189
+ if phase == "train":
190
+ batch = self.divid_and_extend_negative_samples(batch, session_length=session_length,
191
+ predict_length=predict_length, neg_num=neg_num,
192
+ method=sampling_mathod)
193
+ else:
194
+ batch = [batch[:, :session_length], batch[:, session_length:]]
195
+ yield batch
196
+
197
+ if eindex >= all_data.shape[0]:
198
+ batch = all_data[sindex:]
199
+ if phase == "train":
200
+ batch = self.divid_and_extend_negative_samples(batch, session_length=session_length,
201
+ predict_length=predict_length, neg_num=neg_num,
202
+ method=sampling_mathod)
203
+ else:
204
+ batch = [batch[:, :session_length], batch[:, session_length:]]
205
+ yield batch
206
+
207
+ def get_batch_with_neg(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train",
208
+ neg_num=1, sampling_mathod="random"):
209
+ if phase == "train":
210
+ all_data = self.get_all_training_data_with_neg(session_length, neg_num)
211
+ indexes = np.random.permutation(all_data.shape[0])
212
+ all_data = all_data[indexes]
213
+ else:
214
+ all_data = self.get_all_testing_data_with_neg(session_length, neg_num)
215
+
216
+ sindex = 0
217
+ eindex = batch_size
218
+ while eindex < all_data.shape[0]:
219
+ batch = all_data[sindex: eindex]
220
+
221
+ temp = eindex
222
+ eindex = eindex + batch_size
223
+ sindex = temp
224
+ if phase == "train":
225
+ batch = [batch[:, :session_length], batch[:, session_length:session_length + predict_length],
226
+ batch[:, -neg_num:]]
227
+ else:
228
+ batch = [batch[:, :session_length], batch[:, session_length:]]
229
+ yield batch
230
+
231
+ if eindex >= all_data.shape[0]:
232
+ batch = all_data[sindex:]
233
+ if phase == "train":
234
+ batch = [batch[:, :session_length], batch[:, session_length:session_length + predict_length],
235
+ batch[:, -neg_num:]]
236
+ else:
237
+ batch = [batch[:, :session_length], batch[:, session_length:]]
238
+ yield batch
239
+
240
+ def get_batch_tasks_with_neg(self, batch_size, session_length=10, predict_length=1, all_data=None, phase="train",
241
+ neg_num=1, sampling_mathod="random"):
242
+ if phase == "train":
243
+ all_data = self.get_all_meta_training_data_with_neg(session_length, neg_num)
244
+ random.shuffle(all_data)
245
+ else:
246
+ all_data = self.get_all_meta_testing_data_with_neg(session_length, neg_num)
247
+ sindex = 0
248
+ eindex = batch_size
249
+ while eindex < len(all_data):
250
+ batch = all_data[sindex: eindex]
251
+
252
+ temp = eindex
253
+ eindex = eindex + batch_size
254
+ sindex = temp
255
+
256
+ session_items = [batch[i][:, :session_length] for i in range(len(batch))]
257
+
258
+ target_item = [batch[i][:, session_length:session_length + predict_length] for i in range(len(batch))]
259
+
260
+ neg_item = [batch[i][:, -neg_num:] for i in range(len(batch))]
261
+ batch = [session_items, target_item, neg_item]
262
+ yield batch
263
+
264
+ if eindex >= len(all_data):
265
+ batch = all_data[sindex:]
266
+ session_items = [batch[i][:, :session_length] for i in range(len(batch))]
267
+
268
+ target_item = [batch[i][:, session_length:session_length + predict_length] for i in range(len(batch))]
269
+
270
+ neg_item = [batch[i][:, -neg_num:] for i in range(len(batch))]
271
+ batch = [session_items, target_item, neg_item]
272
+ yield batch
273
+
274
+ def divid_and_extend_negative_samples(self, batch_data, session_length, predict_length=1, neg_num=1,
275
+ method="random"):
276
+ """
277
+ divid and extend negative samples
278
+ """
279
+ neg_items = []
280
+ if method == "random":
281
+ for session_and_target in batch_data:
282
+ neg_item = []
283
+ for i in range(neg_num):
284
+ rand_item = random.randint(1, self.index_count)
285
+ while rand_item in session_and_target or rand_item in neg_item:
286
+ rand_item = random.randint(1, self.index_count)
287
+ neg_item.append(rand_item)
288
+ neg_items.append(neg_item)
289
+ else:
290
+
291
+ total_list = set()
292
+ for session in batch_data:
293
+ for i in session:
294
+ total_list.add(i)
295
+ total_list = list(total_list)
296
+ total_list = sorted(total_list, key=lambda item: self.item_total_num[item], reverse=True)
297
+ for i, session in enumerate(batch_data):
298
+ np.random.choice(total_list)
299
+ session_items = batch_data[:, :session_length]
300
+ target_item = batch_data[:, session_length:]
301
+ neg_items = np.array(neg_items)
302
+ return [session_items, target_item, neg_items]
303
+
304
+ def get_all_training_data(self, session_length, predict_length=1):
305
+ if len(self.all_training_data) != 0 and self.train_session_length == session_length:
306
+ return self.all_training_data
307
+ print("Start building the all training dataset")
308
+ all_sessions = []
309
+ for session_data in self.train_data:
310
+ session_index, sessions = session_data.generate_seq_datas(session_length, padding_idx=self.padding_idx)
311
+ if sessions is not None:
312
+ all_sessions.extend(sessions)
313
+ all_sessions = np.array(all_sessions)
314
+ self.all_training_data = all_sessions
315
+ self.train_session_length = session_length
316
+ print("The total number of training samples is", all_sessions.shape)
317
+ return all_sessions
318
+
319
+ def get_all_testing_data(self, session_length, predict_length=1):
320
+ if len(self.all_testing_data) != 0 and self.test_session_length == session_length:
321
+ return self.all_testing_data
322
+ all_sessions = []
323
+ for session_data in self.test_data:
324
+ session_index, sessions = session_data.generate_seq_datas(session_length, padding_idx=self.padding_idx)
325
+ if sessions is not None:
326
+ all_sessions.extend(sessions)
327
+ all_sessions = np.array(all_sessions)
328
+ self.all_testing_data = all_sessions
329
+ self.test_session_length = session_length
330
+ print("The total number of testing samples is", all_sessions.shape)
331
+ return all_sessions
332
+
333
+ def __getitem__(self, idx):
334
+ pass
335
+
336
+ def __len__(self):
337
+ pass
338
+
339
+
340
+ def bpr_loss(r):
341
+ return torch.sum(-torch.log(torch.sigmoid(r)))
342
+
343
+
344
+ def get_hit_num(pred, y_truth):
345
+ """
346
+ pred: numpy type(batch_size,k)
347
+ y_truth: list type (batch_size,groudtruth_num)
348
+ """
349
+
350
+ hit_num = 0
351
+ for i in range(len(y_truth)):
352
+ for value in y_truth[i]:
353
+ hit_num += np.sum(pred[i] == value)
354
+ return hit_num
355
+
356
+
357
+ def get_rr(pred, y_truth):
358
+ rr = 0.
359
+ for i in range(len(y_truth)):
360
+ for value in y_truth[i]:
361
+ hit_indexes = np.where(pred[i] == value)[0]
362
+ for hit_index in hit_indexes:
363
+ rr += 1 / (hit_index + 1)
364
+ return rr
365
+
366
+
367
+ def get_dcg(pred, y_truth):
368
+ y_pred_score = np.zeros_like(pred)
369
+
370
+ for i in range(len(y_truth)):
371
+
372
+ for j, y_pred in enumerate(pred[i]):
373
+ if y_pred == y_truth[i][0]:
374
+ y_pred_score[i][j] = 1
375
+ gain = 2 ** y_pred_score - 1
376
+ discounts = np.tile(np.log2(np.arange(pred.shape[1]) + 2), (len(y_truth), 1))
377
+ dcg = np.sum(gain / discounts, axis=1)
378
+ return dcg
379
+
380
+
381
+ def get_ndcg(pred, y_truth):
382
+ dcg = get_dcg(pred, y_truth)
383
+ idcg = get_dcg(np.concatenate((y_truth, np.zeros_like(pred)[:, :-1] - 1), axis=1), y_truth)
384
+ ndcg = np.sum(dcg / idcg)
385
+
386
+ return ndcg
387
+
388
+
389
+ def dcg_score(y_pre, y_true, k):
390
+ y_pre_score = np.zeros(k)
391
+ if len(y_pre) > k:
392
+ y_pre = y_pre[:k]
393
+ for i in range(len(y_pre)):
394
+ pre_tag = y_pre[i]
395
+ if pre_tag in y_true:
396
+ y_pre_score[i] = 1
397
+ gain = 2 ** y_pre_score - 1
398
+ discounts = np.log2(np.arange(k) + 2)
399
+ return np.sum(gain / discounts)
400
+
401
+
402
+ def ndcg_score(y_pre, y_true, k=5):
403
+ dcg = dcg_score(y_pre, y_true, k)
404
+ idcg = dcg_score(y_true, y_true, k)
405
+ return dcg / idcg
406
+
407
+
408
+ loss_function = torch.nn.CrossEntropyLoss()
409
+
410
+
411
+ class MultiHeadSelfAttention(torch.nn.Module):
412
+ def __init__(self, hidden_size, activate="relu", head_num=2, dropout=0, initializer_range=0.02):
413
+ super(MultiHeadSelfAttention, self).__init__()
414
+ self.config = list()
415
+
416
+ self.hidden_size = hidden_size
417
+
418
+ self.head_num = head_num
419
+ if (self.hidden_size) % head_num != 0:
420
+ raise ValueError(self.head_num, "error")
421
+ self.head_dim = self.hidden_size // self.head_num
422
+
423
+ self.query = torch.nn.Linear(self.hidden_size, self.hidden_size)
424
+ self.key = torch.nn.Linear(self.hidden_size, self.hidden_size)
425
+ self.value = torch.nn.Linear(self.hidden_size, self.hidden_size)
426
+ self.concat_weight = torch.nn.Linear(self.hidden_size, self.hidden_size, bias=False)
427
+ torch.nn.init.normal_(self.query.weight, 0, initializer_range)
428
+ torch.nn.init.normal_(self.key.weight, 0, initializer_range)
429
+ torch.nn.init.normal_(self.value.weight, 0, initializer_range)
430
+ torch.nn.init.normal_(self.concat_weight.weight, 0, initializer_range)
431
+ self.dropout = torch.nn.Dropout(dropout)
432
+
433
+ def dot_score(self, encoder_output):
434
+ query = self.dropout(self.query(encoder_output))
435
+ key = self.dropout(self.key(encoder_output))
436
+ # head_num * batch_size * session_length * head_dim
437
+ querys = torch.stack(query.chunk(self.head_num, -1), 0)
438
+ keys = torch.stack(key.chunk(self.head_num, -1), 0)
439
+ # head_num * batch_size * session_length * session_length
440
+ dots = querys.matmul(keys.permute(0, 1, 3, 2)) / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float))
441
+ # print(len(dots),dots[0].shape)
442
+ return dots
443
+
444
+ def forward(self, encoder_outputs, mask=None):
445
+ attention_energies = self.dot_score(encoder_outputs)
446
+ value = self.dropout(self.value(encoder_outputs))
447
+
448
+ values = torch.stack(value.chunk(self.head_num, -1))
449
+
450
+ if mask is not None:
451
+ eye = torch.eye(mask.shape[-1]).to(device)
452
+ new_mask = torch.clamp_max((1 - (1 - mask.float()).unsqueeze(1).permute(0, 2, 1).bmm(
453
+ (1 - mask.float()).unsqueeze(1))) + eye, 1)
454
+ attention_energies = attention_energies - new_mask * 1e12
455
+ weights = F.softmax(attention_energies, dim=-1)
456
+ weights = weights * (1 - new_mask)
457
+ else:
458
+ weights = F.softmax(attention_energies, dim=2)
459
+
460
+ # head_num * batch_size * session_length * head_dim
461
+ outputs = weights.matmul(values)
462
+ # batch_size * session_length * hidden_size
463
+ outputs = torch.cat([outputs[i] for i in range(outputs.shape[0])], dim=-1)
464
+ outputs = self.dropout(self.concat_weight(outputs))
465
+
466
+ return outputs
467
+
468
+
469
+ class PositionWiseFeedForward(torch.nn.Module):
470
+ def __init__(self, hidden_size, initializer_range=0.02):
471
+ super(PositionWiseFeedForward, self).__init__()
472
+ self.final1 = torch.nn.Linear(hidden_size, hidden_size * 4, bias=True)
473
+ self.final2 = torch.nn.Linear(hidden_size * 4, hidden_size, bias=True)
474
+ torch.nn.init.normal_(self.final1.weight, 0, initializer_range)
475
+ torch.nn.init.normal_(self.final2.weight, 0, initializer_range)
476
+
477
+ def forward(self, x):
478
+ x = F.gelu(self.final1(x))
479
+ x = self.final2(x)
480
+ return x
481
+
482
+
483
+ class TransformerLayer(torch.nn.Module):
484
+ def __init__(self, hidden_size, activate="relu", head_num=2, dropout=0, attention_dropout=0,
485
+ initializer_range=0.02):
486
+ super(TransformerLayer, self).__init__()
487
+ self.dropout = torch.nn.Dropout(dropout)
488
+ self.mh = MultiHeadSelfAttention(hidden_size=hidden_size, activate=activate, head_num=head_num,
489
+ dropout=attention_dropout, initializer_range=initializer_range)
490
+ self.pffn = PositionWiseFeedForward(hidden_size, initializer_range=initializer_range)
491
+ self.layer_norm = torch.nn.LayerNorm(hidden_size)
492
+ self.dropout = torch.nn.Dropout(dropout)
493
+
494
+ def forward(self, encoder_outputs, mask=None):
495
+ encoder_outputs = self.layer_norm(encoder_outputs + self.dropout(self.mh(encoder_outputs, mask)))
496
+ encoder_outputs = self.layer_norm(encoder_outputs + self.dropout(self.pffn(encoder_outputs)))
497
+ return encoder_outputs
498
+
499
+
500
+ class BERT(torch.nn.Module):
501
+ def __init__(self, hidden_size=100, itemNum=0, posNum=0, padding_idx=0, dropout=0.5, attention_dropout=0,
502
+ head_num=2, sa_layer_num=1,
503
+ activate="relu", initializer_range=0.02):
504
+ super(BERT, self).__init__()
505
+ self.hidden_size = hidden_size
506
+ self.head_num = head_num
507
+ self.session_length = session_length
508
+ self.sa_layer_num = sa_layer_num
509
+ self.transformers = torch.nn.ModuleList([TransformerLayer(hidden_size, head_num=head_num, dropout=dropout,
510
+ attention_dropout=attention_dropout,
511
+ initializer_range=initializer_range) for _ in
512
+ range(sa_layer_num)])
513
+
514
+ def forward(self, compute_output, attention_mask):
515
+ for sa_i in range(self.sa_layer_num):
516
+ compute_output = self.transformers[sa_i](compute_output, attention_mask)
517
+ return compute_output
518
+
519
+
520
+ class BERT4Rec(torch.nn.Module):
521
+ def __init__(self, hidden_size=64, itemNum=0, posNum=0, padding_idx=0, dropout=0.5, attention_dropout=0, head_num=2,
522
+ sa_layer_num=1, datasets = "TN", h_lamdba=0.05,
523
+ activate="relu", initializer_range=0.02):
524
+ super(BERT4Rec, self).__init__()
525
+ self.padding_idx = padding_idx
526
+ self.hidden_size = hidden_size
527
+ self.head_num = head_num
528
+ self.session_length = session_length
529
+ self.sa_layer_num = sa_layer_num
530
+ self.activate = torch.relu
531
+ self.dropout = torch.nn.Dropout(dropout)
532
+
533
+ self.n_items = itemNum +2
534
+
535
+ self.h_lamdba = h_lamdba
536
+
537
+ self.mask_index = torch.tensor(itemNum + 1).to(device)
538
+ self.mask_position = torch.tensor(posNum + 1).to(device)
539
+ self.item_embedding = torch.nn.Embedding(itemNum + 2, hidden_size, padding_idx=self.padding_idx)
540
+ self.position_embedding = torch.nn.Embedding(posNum + 2, hidden_size, padding_idx=self.padding_idx)
541
+ self.bert = BERT(hidden_size=hidden_size, dropout=dropout, attention_dropout=attention_dropout,
542
+ head_num=head_num, sa_layer_num=sa_layer_num,
543
+ activate=activate, initializer_range=initializer_range)
544
+
545
+ # torch.nn.init.normal_(self.item_embedding.weight, 0, initializer_range)
546
+ # torch.nn.init.constant_(self.item_embedding.weight[0], 0)
547
+
548
+ text_emb_path = './BERTHealth/' + datasets + '/H_title_emb100.npy'
549
+ textWeights = np.load(text_emb_path)
550
+ if datasets == 'NERD':
551
+ textWeights = np.vstack((textWeights, np.zeros(hidden_size)))
552
+ self.item_embedding.weight.data.copy_(torch.from_numpy(textWeights))
553
+
554
+ self.emb_healthy = nn.Embedding(2, 768, padding_idx=0)
555
+ self.emb_harmful = nn.Embedding(2, 768, padding_idx=0)
556
+ self.emb_reason = nn.Embedding(itemNum + 2, hidden_size, padding_idx=0)
557
+
558
+ reason_emb_path = './BERTHealth/' + datasets + '/H_reason_emb100.npy'
559
+ reasonWeights = np.load(reason_emb_path)
560
+ if datasets == 'NERD':
561
+ reasonWeights = np.vstack((reasonWeights, np.zeros(hidden_size)))
562
+ self.emb_reason.weight.data.copy_(torch.from_numpy(reasonWeights))
563
+
564
+ self.emb_reason.weight.requires_grad = False
565
+
566
+ health_emb_path = './BERTHealth/' + datasets + '/H_pos_emb100.npy'
567
+ healWeights = np.load(health_emb_path)
568
+ self.emb_healthy.weight.data.copy_(torch.from_numpy(healWeights))
569
+ self.emb_healthy.weight.requires_grad = False
570
+
571
+ harm_emb_path = './BERTHealth/' + datasets + '/H_neg_emb100.npy'
572
+ harmWeights = np.load(harm_emb_path)
573
+ self.emb_harmful.weight.data.copy_(torch.from_numpy(harmWeights))
574
+ self.emb_harmful.weight.requires_grad = False
575
+
576
+ self.dense_text_health = nn.Linear(768, hidden_size)
577
+ self.dense_text_harm = nn.Linear(768, hidden_size)
578
+
579
+ self.cos_sim = nn.CosineSimilarity(dim=-1)
580
+ self.ul_W1 = nn.Linear(hidden_size, hidden_size)
581
+ self.ul_W2 = nn.Linear(hidden_size, hidden_size)
582
+ self.ul_W3 = nn.Linear(hidden_size, hidden_size)
583
+
584
+ # self_attention
585
+ num_heads = 4
586
+ if hidden_size % num_heads != 0: # 整除
587
+ raise ValueError(
588
+ "The hidden size (%d) is not a multiple of the number of attention "
589
+ "heads (%d)" % (hidden_size, num_heads))
590
+ # 参数定义
591
+ self.num_heads = num_heads # 4
592
+ self.attention_head_size = int(hidden_size / self.num_heads) # 16 每个注意力头的维度
593
+ self.all_head_size = int(self.num_heads * self.attention_head_size)
594
+ # query, key, value 的线性变换(上述公式2)
595
+ self.query = nn.Linear(hidden_size, hidden_size) # 128, 128
596
+ self.key = nn.Linear(hidden_size, hidden_size)
597
+ self.value = nn.Linear(hidden_size, hidden_size)
598
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
599
+
600
+ torch.nn.init.normal_(self.position_embedding.weight, 0, initializer_range)
601
+ torch.nn.init.constant_(self.position_embedding.weight[0], 0)
602
+ self.projection = torch.nn.Linear(hidden_size, hidden_size, bias=True)
603
+ torch.nn.init.normal_(self.projection.weight, 0, initializer_range)
604
+ self.output_bias = torch.nn.Parameter(torch.zeros(itemNum, ))
605
+ self.layer_norm = torch.nn.LayerNorm(hidden_size)
606
+
607
+ def transpose_for_scores(self, x, attention_head_size):
608
+ # INPUT: x'shape = [bs, seqlen, hid_size] 假设hid_size=128
609
+ new_x_shape = x.size()[:-1] + (self.num_heads, attention_head_size) # [bs, seqlen, 8, 16]
610
+ x = x.view(*new_x_shape) #
611
+ return x.permute(0, 2, 1, 3)
612
+
613
+ def user_loss(self, user_emb, health_emb, harm_emb):
614
+ # health_sim = self.cos_sim(self.ul_W1(user_emb), self.ul_W2(health_emb))
615
+ # harm_sim = self.cos_sim(self.ul_W1(user_emb), self.ul_W3(harm_emb))
616
+
617
+ health_sim = self.ul_W1(user_emb) * self.ul_W2(health_emb)
618
+ health_sim = torch.sum(health_sim, -1)
619
+ harm_sim = self.ul_W1(user_emb) * self.ul_W3(harm_emb)
620
+ harm_sim = torch.sum(harm_sim, -1)
621
+
622
+ # health_sim = user_emb * health_emb
623
+ # health_sim = torch.sum(health_sim, -1)
624
+ # harm_sim = user_emb + harm_emb
625
+ # harm_sim = torch.sum(harm_sim, -1)
626
+
627
+ ssl_loss = torch.log10(torch.exp(health_sim)) - torch.log10(torch.exp(health_sim) + torch.exp(harm_sim))
628
+ ssl_loss = torch.sum(ssl_loss, 0)
629
+ return -ssl_loss
630
+
631
+ def forward(self, session, mask_indexes=None):
632
+
633
+ mask = (session != 0).float()
634
+
635
+ mask = mask.unsqueeze(2).repeat((1, 1, self.hidden_size))
636
+ session_item_embeddings = self.item_embedding(session) * mask
637
+ positions = torch.arange(0, session.shape[1]).unsqueeze(0).repeat((session.shape[0], 1)).to(device)
638
+ session_position_embeddings = self.position_embedding(positions) * mask
639
+ session_item_vecs = self.dropout(self.layer_norm(session_item_embeddings + session_position_embeddings))
640
+ attention_mask = (session == self.padding_idx)
641
+ if mask_indexes is not None:
642
+ compute_output = self.dropout(self.bert(session_item_vecs, attention_mask).gather(1, mask_indexes))
643
+ else:
644
+ compute_output = self.dropout(self.bert(session_item_vecs, attention_mask)[:, -1, :])
645
+ compute_output = F.gelu(self.dropout(self.projection(compute_output)))
646
+ scores = torch.matmul(compute_output, self.item_embedding.weight[1:-1].t()) + self.output_bias
647
+
648
+ # health
649
+ seq = session
650
+ # Self-attention healthy
651
+ mask = torch.where(seq > 0, torch.tensor([1.], device=self.device),
652
+ torch.tensor([0.], device=self.device))
653
+ mask_h = mask.float().unsqueeze(-1)
654
+ attention_mask = mask_h.permute(0, 2, 1).unsqueeze(1) # [bs, 1, 1, seqlen] 增加维度
655
+ attention_mask = (1.0 - attention_mask) * -10000.0
656
+
657
+ seq_h = seq
658
+ item_f = self.emb_reason(seq_h)
659
+ K_emb = item_f
660
+ V_emb = item_f
661
+ all_health = torch.cuda.LongTensor(list(K_emb.shape)[0], list(K_emb.shape)[1]).fill_(1)
662
+ Q_emb = self.emb_healthy(all_health)
663
+ Q_emb = self.dense_text_health(Q_emb)
664
+
665
+ mixed_query_layer = self.query(Q_emb) # [bs, seqlen, hid_size]
666
+ mixed_key_layer = self.key(K_emb) # [bs, seqlen, hid_size]
667
+ mixed_value_layer = self.value(V_emb) # [bs, seqlen, hid_size]
668
+
669
+ attention_head_size = int(hidden_size / self.num_heads)
670
+ query_layer = self.transpose_for_scores(mixed_query_layer, attention_head_size) # [bs, 8, seqlen, 16]
671
+ key_layer = self.transpose_for_scores(mixed_key_layer, attention_head_size)
672
+ value_layer = self.transpose_for_scores(mixed_value_layer, attention_head_size) # [bs, 8, seqlen, 16]
673
+ # Take the dot product between "query" and "key" to get the raw attention scores.
674
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
675
+ # [bs, 8, seqlen, 16]*[bs, 8, 16, seqlen] ==> [bs, 8, seqlen, seqlen]
676
+ attention_scores = attention_scores / math.sqrt(attention_head_size) # [bs, 8, seqlen, seqlen]
677
+ attention_scores = attention_scores + attention_mask
678
+ attention_probs = nn.Softmax(dim=-1)(attention_scores) # [bs, 8, seqlen, seqlen]
679
+ # This is actually dropping out entire tokens to attend to, which might
680
+ # seem a bit unusual, but is taken from the original Transformer paper.
681
+ attention_probs = self.dropout(attention_probs)
682
+
683
+ # 矩阵相乘,[bs, 8, seqlen, seqlen]*[bs, 8, seqlen, 16] = [bs, 8, seqlen, 16]
684
+ context_layer = torch.matmul(attention_probs, value_layer) # [bs, 8, seqlen, 16]
685
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # [bs, seqlen, 8, 16]
686
+ new_context_layer_shape = context_layer.size()[:-2] + (hidden_size,) # [bs, seqlen, 128]
687
+ sa_result = context_layer.view(*new_context_layer_shape)
688
+ # last hidden state
689
+ mask_h = mask.long().unsqueeze(-1)
690
+ item_pos = torch.tensor(range(1, V_emb.size()[1] + 1), device='cuda')
691
+ item_pos = item_pos.unsqueeze(0).expand_as(seq_h)
692
+ item_pos = item_pos * mask_h.squeeze(2)
693
+ item_last_num = torch.max(item_pos, 1)[0].unsqueeze(1).expand_as(item_pos)
694
+ last_pos_t = torch.where(item_pos - item_last_num >= 0, torch.tensor([1.0], device='cuda'),
695
+ torch.tensor([0.0], device='cuda'))
696
+ as_last_unit = last_pos_t.unsqueeze(2).expand_as(sa_result) * sa_result
697
+ user_h = torch.sum(as_last_unit, 1)
698
+
699
+ # item_embs_health = self.emb_reason(torch.arange(self.n_items).to(self.device))
700
+ # scores_health = torch.matmul(user_h, item_embs_health.permute(1, 0))
701
+ # scores = scores_rec + self.h_lambda*scores_health
702
+
703
+ item_embs_reason = self.emb_reason.weight[1:-1]
704
+
705
+ item_merge = item_embs_reason
706
+
707
+ user_health = torch.cuda.LongTensor(list(user_h.shape)[0]).fill_(1)
708
+ user_health_emb = self.emb_healthy(user_health)
709
+ user_health_emb = self.dense_text_health(user_health_emb)
710
+
711
+ scores_ui = torch.matmul(user_h, item_merge.permute(1, 0))
712
+ scores_item = torch.matmul(user_health_emb, item_merge.permute(1, 0))
713
+ # scores = scores_rec
714
+ # scores = self.sf(scores)
715
+
716
+ # ssl loss
717
+
718
+ u_index = torch.cuda.LongTensor(list(user_h.shape)[0]).fill_(1)
719
+
720
+ u_health_emb = self.emb_healthy(u_index)
721
+ u_health_emb = self.dense_text_health(u_health_emb)
722
+
723
+ u_harm_emb = self.emb_harmful(u_index)
724
+ u_harm_emb = self.dense_text_harm(u_harm_emb)
725
+
726
+ ssl_loss = self.user_loss(user_h, u_health_emb, u_harm_emb)
727
+
728
+
729
+ return scores, ssl_loss, self.h_lamdba * (scores_item + scores_ui)
730
+
731
+ def predict_top_k(self, session, k=20):
732
+ result, ssl_loss, health_loss = self.forward(session)
733
+ result = torch.topk(result+health_loss, k, dim=1)[1]
734
+
735
+ return result
736
+
737
+
738
+
739
+
740
+
741
+ def train(args):
742
+ hidden_size = args["hidden_size"] if "hidden_size" in args.keys() else 100
743
+ attention_dropout = args["attention_dropout"] if "attention_dropout" in args.keys() else 0.2
744
+ dropout = args["dropout"] if "dropout" in args.keys() else 0.5
745
+ lr = args["lr"] if "lr" in args.keys() else 5e-4
746
+ sa_layer_num = args["sa_layer_num"] if "sa_layer_num" in args.keys() else 1
747
+ amsgrad = args["amsgrad"] if "amsgrad" in args.keys() else True
748
+ session_length = args["session_length"] if "session_length" in args.keys() else 200
749
+ head_num = args["head_num"] if "head_num" in args.keys() else 1
750
+ datasets_name = args["datasets"] if "datasets" in args.keys() else "TN"
751
+ h_lamdba = args["lamdba"] if "lamdba" in args.keys() else 0.05
752
+ model = BERT4Rec(hidden_size=hidden_size, itemNum=dataset.index_count, posNum=session_length, padding_idx=0,
753
+ dropout=dropout,
754
+ activate="selu", attention_dropout=attention_dropout, head_num=head_num,
755
+ sa_layer_num=sa_layer_num, datasets=datasets_name, h_lamdba=h_lamdba).to(device)
756
+ opti = torch.optim.Adam(model.parameters(), lr=lr)
757
+ patience = args["patience"] if "patience" in args.keys() else 5
758
+ best_model_hr = 0.0
759
+ best_model_mrr = 0.0
760
+ best_r1m = 0.0
761
+ best_model = None
762
+ predict_nums = [1, 5, 10, 20]
763
+ no_improvement_epoch = 0
764
+ start_train_time = datetime.datetime.now()
765
+ for epoch in range(epochs):
766
+ batch_losses = []
767
+ epoch_losses = []
768
+ model.train()
769
+ for i, batch_data in enumerate(dataset.get_batch(batch_size, session_length, phase="train")):
770
+ mask_item = torch.ones_like(torch.tensor(batch_data[1])) * dataset.index_count + 1
771
+ sessions = torch.cat([torch.tensor(batch_data[0]), mask_item], dim=-1)
772
+ target_items = torch.tensor(batch_data[1]).squeeze().to(device) - 1
773
+ result_pos, ssl_loss, health_loss = model(sessions.to(device))
774
+ loss = loss_function(result_pos, target_items) + ssl_loss
775
+ opti.zero_grad()
776
+ loss.backward()
777
+ opti.step()
778
+ batch_losses.append(loss.cpu().detach().numpy())
779
+ epoch_losses.append(loss.cpu().detach().numpy())
780
+ if i % plot_num == 0:
781
+ time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
782
+ print("[%s] [%d/%d] %d mean_batch_loss : %0.6f" % (time, epoch + 1, epochs, i, np.mean(batch_losses)))
783
+ batch_losses = []
784
+
785
+ model.eval()
786
+ with torch.no_grad():
787
+ start_test_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
788
+ print("Start predicting", start_test_time)
789
+ rrs = [0 for _ in range(len(predict_nums))]
790
+ hit_nums = [0 for _ in range(len(predict_nums))]
791
+ ndcgs = [0 for _ in range(len(predict_nums))]
792
+ seq_save = []
793
+ label_save = []
794
+ pre_save = []
795
+ for i, batch_data in enumerate(dataset.get_batch(batch_size, session_length, phase="test")):
796
+ mask_item = torch.ones_like(torch.tensor(batch_data[1])) * dataset.index_count + 1
797
+ sessions = torch.cat([torch.tensor(batch_data[0]), mask_item], dim=-1).to(device)
798
+
799
+ target_items = np.array(batch_data[1]) - 1
800
+ y_pred = model.predict_top_k(sessions, 20).cpu().numpy()
801
+
802
+ # top-k item ID number
803
+ pre_k = y_pred + 1
804
+ seq_temp = sessions.tolist()
805
+ seq_save += seq_temp
806
+ label_save += np.array(batch_data[1]).flatten().tolist()
807
+ pre_save += pre_k.tolist()
808
+
809
+ for j, predict_num in enumerate(predict_nums):
810
+ hit_nums[j] += get_hit_num(y_pred[:, :predict_num], target_items)
811
+ rrs[j] += get_rr(y_pred[:, :predict_num], target_items)
812
+ ndcgs[j] += get_ndcg(y_pred[:, :predict_num], target_items)
813
+
814
+ end_test_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
815
+
816
+ hrs = [hit_num / len(dataset.all_testing_data) for hit_num in hit_nums]
817
+ mrrs = [rr / len(dataset.all_testing_data) for rr in rrs]
818
+ mndcgs = [ndcg / len(dataset.all_testing_data) for ndcg in ndcgs]
819
+ if hrs[-1] + mrrs[-1] > best_r1m:
820
+ # print("change best")
821
+ best_model = deepcopy(model)
822
+ # best_model_hr = hrs[-1]
823
+ # best_model_mrr = mrrs[-1]
824
+ # best_r1m = hrs[-1] + mrrs[-1]
825
+ best_model_hr = hrs
826
+ best_model_mrr = mrrs
827
+ best_model_ndcg = mndcgs
828
+ best_r1m = hrs[-1] + mrrs[-1]
829
+ no_improvement_epoch = 0
830
+
831
+ pre_save_path = "./BERTHealth/" + dataset_name + "/prediction_health.txt"
832
+ # res_pre = (seq_save, label_save, pre_save)
833
+ res_pre = (label_save, label_save, pre_save)
834
+ pickle.dump(res_pre, open(pre_save_path, 'wb'))
835
+ else:
836
+ no_improvement_epoch += 1
837
+ print("testing finish [%s] " % end_test_time)
838
+ for k, predict_num in enumerate(predict_nums):
839
+ print("\tHR@%d=%.5f MRR@%d=%.5f NDCG@%d=%.5f" % (
840
+ predict_num, hrs[k], predict_num, mrrs[k], predict_num, mndcgs[k]))
841
+ if no_improvement_epoch >= patience:
842
+ print("early stopping")
843
+ break
844
+ end_train_time = datetime.datetime.now()
845
+
846
+
847
+
848
+ print("training and testting over, Total time", end_train_time - start_train_time)
849
+ return best_model, best_model_hr, best_model_mrr, best_model_ndcg
850
+
851
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
852
+ # TN/QB/NERD
853
+ dataset_name = 'TN'
854
+ train_path = './BERTHealth/' + dataset_name + '/train.txt'
855
+ test_path = './BERTHealth/' + dataset_name + '/test.txt'
856
+ dataset = SessionDataSet(train_file=train_path, test_file=test_path)
857
+ # torch.cuda.set_device(1)
858
+ epochs = 50
859
+ hidden_sizes = [100] # rr:100 dn:100
860
+ dropouts = [0.3] # rr:0.3 dn:0.3
861
+ attention_dropouts = [0] # rr:0 dn:0
862
+ lrs = [1e-3] # rr:1e-3 dn:5e-4
863
+ session_lengths = [50] # rr:50 dn:50
864
+ sa_layer_nums = [4] # rr:4 dn:4
865
+
866
+ # lambda balances user interest and content healthiness
867
+ h_lamdba = 0.01
868
+
869
+ patience = 10
870
+ head_nums = [2] # rr:2 dn:4, 32 dimension each head
871
+ amsgrads = [True]
872
+ best_params = ""
873
+ best_all_model = 0.0
874
+ best_all_hr = 0.0
875
+ best_all_mrr = 0.0
876
+ best_all_r1m = 0.0
877
+ print('datasets: ',dataset_name)
878
+ for session_length in session_lengths:
879
+ for hidden_size, head_num in zip(hidden_sizes, head_nums):
880
+ for amsgrad in amsgrads:
881
+ for attention_dropout in attention_dropouts:
882
+ for dropout in dropouts:
883
+ for lr in lrs:
884
+ for sa_layer_num in sa_layer_nums:
885
+ # for head_num in head_nums:
886
+ args = {}
887
+ print(
888
+ "current model hyper-parameters: session_length=%d, hidden_size=%d, lr=%.4f,head_num=%d, amsgrad=%s, attention_dropout=%.2f, dropout=%.2f, sa_layer_num=%d. \n" % (
889
+ session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
890
+ dropout,
891
+ sa_layer_num))
892
+ args["session_length"] = session_length
893
+ args["hidden_size"] = hidden_size
894
+ args["amsgrad"] = amsgrad
895
+ args["attention_dropout"] = attention_dropout
896
+ args["dropout"] = dropout
897
+ args["sa_layer_num"] = sa_layer_num
898
+ args["lr"] = lr
899
+ args["head_num"] = head_num
900
+ args["patience"] = patience
901
+ args["datasets"] = dataset_name
902
+ args["lamdba"] = h_lamdba
903
+ best_model, best_model_hr, best_model_mrr, best_model_ndcg = train(args)
904
+ # if best_model_hr + best_model_mrr > best_all_r1m:
905
+ # print("best model change")
906
+ # best_all_r1m = best_model_hr + best_model_mrr
907
+ # best_all_hr = best_model_hr
908
+ # best_all_mrr = best_model_mrr
909
+ # best_all_model = best_model
910
+ # best_params = "session_length-%d, hidden_size-%d, lr-%.4f,head_num=%d, amsgrad-%s, attention_dropout-%.2f, dropout-%.2f, sa_layer_num-%d" % (
911
+ # session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
912
+ # dropout,
913
+ # sa_layer_num)
914
+ # best_model = None
915
+ # print(
916
+ # "current model hyper-parameters: session_length=%d, hidden_size=%d, lr=%.4f,head_num=%d, amsgrad=%s, attention_dropout=%.2f, dropout=%.2f, sa_layer_num=%d. \n" % (
917
+ # session_length, hidden_size, lr, head_num, str(amsgrad), attention_dropout,
918
+ # dropout,
919
+ # sa_layer_num))
920
+ print("current model hyper-parameters: session_length=%d, hidden_size=%d, head_num=%d, sa_layer_num=%d. \n" % (session_length, hidden_size, head_num, sa_layer_num))
921
+ # print("current model HR@20=%.5f MRR@20=%.5f." % (best_model_hr, best_model_mrr))
922
+ print('P@1\tP@5\tM@5\tN@5\tP@10\tM@10\tN@10\tP@20\tM@20\tN@20\t')
923
+ print("%.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f\t %.2f" % (
924
+ best_model_hr[0]*100, best_model_hr[1]*100, best_model_mrr[1]*100, best_model_ndcg[1]*100, best_model_hr[2]*100, best_model_mrr[2]*100, best_model_ndcg[2]*100, best_model_hr[3]*100, best_model_mrr[3]*100, best_model_ndcg[3]*100))
925
+
926
+
927
+ # CKLLM 存储训练好的item embs,padding 0
928
+ # save_path = './BERTCKLLM/' + dataset_name + '/item_embs.pth'
929
+ # torch.save(best_model.item_embedding.state_dict(), save_path)
930
+ # print("The best result HR@20=%.5f MRR@20=%.5f, hyper-parameters: %s. " % (best_all_hr, best_all_mrr, best_params))
931
+ print(dataset_name)
932
+ print("over.")
HealthRec/HealthRec_code/BERT4Rec/environment.yml ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: DSAN
2
+ channels:
3
+ - defaults
4
+ dependencies:
5
+ - _libgcc_mutex=0.1=main
6
+ - _openmp_mutex=4.5=1_gnu
7
+ - _pytorch_select=0.1=cpu_0
8
+ - blas=1.0=mkl
9
+ - bottleneck=1.3.2=py39hdd57654_1
10
+ - brotli=1.0.9=he6710b0_2
11
+ - ca-certificates=2021.10.26=h06a4308_2
12
+ - certifi=2021.10.8=py39h06a4308_0
13
+ - cffi=1.14.6=py39h400218f_0
14
+ - cycler=0.10.0=py39h06a4308_0
15
+ - dbus=1.13.18=hb2f20db_0
16
+ - expat=2.4.1=h2531618_2
17
+ - fontconfig=2.13.1=h6c09931_0
18
+ - fonttools=4.25.0=pyhd3eb1b0_0
19
+ - freetype=2.11.0=h70c0345_0
20
+ - giflib=5.2.1=h7b6447c_0
21
+ - glib=2.69.1=h5202010_0
22
+ - gst-plugins-base=1.14.0=h8213a91_2
23
+ - gstreamer=1.14.0=h28cd5cc_2
24
+ - icu=58.2=he6710b0_3
25
+ - intel-openmp=2019.4=243
26
+ - jpeg=9d=h7f8727e_0
27
+ - kiwisolver=1.3.1=py39h2531618_0
28
+ - lcms2=2.12=h3be6417_0
29
+ - ld_impl_linux-64=2.35.1=h7274673_9
30
+ - libffi=3.3=he6710b0_2
31
+ - libgcc-ng=9.1.0=hdf63c60_0
32
+ - libgomp=9.3.0=h5101ec6_17
33
+ - libmklml=2019.0.5=0
34
+ - libpng=1.6.37=hbc83047_0
35
+ - libstdcxx-ng=9.1.0=hdf63c60_0
36
+ - libtiff=4.2.0=h85742a9_0
37
+ - libuuid=1.0.3=h7f8727e_2
38
+ - libwebp=1.2.0=h89dd481_0
39
+ - libwebp-base=1.2.0=h27cfd23_0
40
+ - libxcb=1.14=h7b6447c_0
41
+ - libxml2=2.9.10=hb55368b_3
42
+ - lz4-c=1.9.3=h295c915_1
43
+ - matplotlib=3.4.3=py39h06a4308_0
44
+ - matplotlib-base=3.4.3=py39hbbc1b5f_0
45
+ - mkl=2020.2=256
46
+ - mkl-service=2.3.0=py39he8ac12f_0
47
+ - mkl_fft=1.3.0=py39h54f3939_0
48
+ - mkl_random=1.0.2=py39h63df603_0
49
+ - munkres=1.1.4=py_0
50
+ - ncurses=6.3=h7f8727e_2
51
+ - ninja=1.10.2=py39hd09550d_3
52
+ - numexpr=2.7.3=py39hb2eb853_0
53
+ - numpy=1.19.2=py39h89c1606_0
54
+ - numpy-base=1.19.2=py39h2ae0177_0
55
+ - olefile=0.46=pyhd3eb1b0_0
56
+ - openssl=1.1.1l=h7f8727e_0
57
+ - pandas=1.3.4=py39h8c16a72_0
58
+ - pcre=8.45=h295c915_0
59
+ - pillow=8.4.0=py39h5aabda8_0
60
+ - pip=21.2.4=py39h06a4308_0
61
+ - pycparser=2.21=pyhd3eb1b0_0
62
+ - pyparsing=3.0.4=pyhd3eb1b0_0
63
+ - pyqt=5.9.2=py39h2531618_6
64
+ - python=3.9.7=h12debd9_1
65
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
66
+ - pytz=2021.3=pyhd3eb1b0_0
67
+ - qt=5.9.7=h5867ecd_1
68
+ - readline=8.1=h27cfd23_0
69
+ - setuptools=58.0.4=py39h06a4308_0
70
+ - sip=4.19.13=py39h2531618_0
71
+ - six=1.16.0=pyhd3eb1b0_0
72
+ - sqlite=3.36.0=hc218d9a_0
73
+ - tk=8.6.11=h1ccaba5_0
74
+ - tornado=6.1=py39h27cfd23_0
75
+ - tqdm=4.62.3=pyhd3eb1b0_1
76
+ - typing-extensions=3.10.0.2=hd3eb1b0_0
77
+ - typing_extensions=3.10.0.2=pyh06a4308_0
78
+ - tzdata=2021e=hda174b7_0
79
+ - wheel=0.37.0=pyhd3eb1b0_1
80
+ - xz=5.2.5=h7b6447c_0
81
+ - zlib=1.2.11=h7b6447c_3
82
+ - zstd=1.4.9=haebb681_0
83
+ - pip:
84
+ - torch==1.8.0+cu111
85
+ - torchaudio==0.8.0
86
+ - torchvision==0.9.0+cu111
87
+ prefix: /home/dutir923/zhangxiaokun/anaconda3/envs/DSAN
88
+
HealthRec/HealthRec_code/GRU4Rec/dataset.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ create on 18 Sep, 2019
4
+
5
+ @author: wangshuo
6
+
7
+ Reference: https://github.com/lijingsdu/sessionRec_NARM/blob/master/data_process.py
8
+ """
9
+
10
+ import pickle
11
+ import torch
12
+ from torch.utils.data import Dataset
13
+ import numpy as np
14
+
15
+
16
+ def load_data(root, valid_portion=0.1, maxlen=200, sort_by_len=False, test_lab='text.txt'):
17
+ '''Loads the dataset
18
+
19
+ :type path: String
20
+ :param path: The path to the dataset (here RSC2015)
21
+ :type n_items: int
22
+ :param n_items: The number of items.
23
+ :type valid_portion: float
24
+ :param valid_portion: The proportion of the full train set used for
25
+ the validation set.
26
+ :type maxlen: None or positive int
27
+ :param maxlen: the max sequence length we use in the train/valid set.
28
+ :type sort_by_len: bool
29
+ :name sort_by_len: Sort by the sequence lenght for the train,
30
+ valid and test set. This allow faster execution as it cause
31
+ less padding per minibatch. Another mechanism must be used to
32
+ shuffle the train set at each epoch.
33
+
34
+ '''
35
+
36
+ # Load the dataset
37
+ path_train_data = root + 'train.txt'
38
+ path_test_data = root + test_lab
39
+ with open(path_train_data, 'rb') as f1:
40
+ train_set = pickle.load(f1)
41
+
42
+ with open(path_test_data, 'rb') as f2:
43
+ test_set = pickle.load(f2)
44
+
45
+ if maxlen:
46
+ new_train_set_x = []
47
+ new_train_set_y = []
48
+ for x, y in zip(train_set[0], train_set[1]):
49
+ # dawn
50
+ if len(x) <= maxlen:
51
+ new_train_set_x.append(x)
52
+ new_train_set_y.append(y)
53
+ else:
54
+ new_train_set_x.append(x[:maxlen])
55
+ new_train_set_y.append(x[maxlen])
56
+ # if len(x) < maxlen:
57
+ # new_train_set_x.append(x)
58
+ # new_train_set_y.append(y)
59
+ # else:
60
+ # new_train_set_x.append(x[:maxlen])
61
+ # new_train_set_y.append(y)
62
+ train_set = (new_train_set_x, new_train_set_y)
63
+ del new_train_set_x, new_train_set_y
64
+
65
+ new_test_set_x = []
66
+ new_test_set_y = []
67
+ for xx, yy in zip(test_set[0], test_set[1]):
68
+ # dawn
69
+ if len(xx) <= maxlen:
70
+ new_test_set_x.append(xx)
71
+ new_test_set_y.append(yy)
72
+ else:
73
+ new_test_set_x.append(xx[:maxlen])
74
+ new_test_set_y.append(xx[maxlen])
75
+ # if len(xx) < maxlen:
76
+ # new_test_set_x.append(xx)
77
+ # new_test_set_y.append(yy)
78
+ # else:
79
+ # new_test_set_x.append(xx[:maxlen])
80
+ # new_test_set_y.append(yy)
81
+ test_set = (new_test_set_x, new_test_set_y)
82
+ del new_test_set_x, new_test_set_y
83
+
84
+ # split training set into validation set
85
+ train_set_x, train_set_y = train_set
86
+ n_samples = len(train_set_x)
87
+ sidx = np.arange(n_samples, dtype='int32')
88
+ np.random.shuffle(sidx)
89
+ n_train = int(np.round(n_samples * (1. - valid_portion)))
90
+ valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
91
+ valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
92
+ train_set_x = [train_set_x[s] for s in sidx[:n_train]]
93
+ train_set_y = [train_set_y[s] for s in sidx[:n_train]]
94
+
95
+ (test_set_x, test_set_y) = test_set
96
+
97
+ def len_argsort(seq):
98
+ return sorted(range(len(seq)), key=lambda x: len(seq[x]))
99
+
100
+ if sort_by_len:
101
+ sorted_index = len_argsort(test_set_x)
102
+ test_set_x = [test_set_x[i] for i in sorted_index]
103
+ test_set_y = [test_set_y[i] for i in sorted_index]
104
+
105
+ sorted_index = len_argsort(valid_set_x)
106
+ valid_set_x = [valid_set_x[i] for i in sorted_index]
107
+ valid_set_y = [valid_set_y[i] for i in sorted_index]
108
+
109
+ train = (train_set_x, train_set_y)
110
+ valid = (valid_set_x, valid_set_y)
111
+ test = (test_set_x, test_set_y)
112
+
113
+ return train, valid, test
114
+
115
+
116
+ class RecSysDataset(Dataset):
117
+ """define the pytorch Dataset class for yoochoose and diginetica datasets.
118
+ """
119
+ def __init__(self, data):
120
+ self.data = data
121
+ print('-'*50)
122
+ print('Dataset info:')
123
+ print('Number of sessions: {}'.format(len(data[0])))
124
+ print('-'*50)
125
+
126
+ def __getitem__(self, index):
127
+ session_items = self.data[0][index]
128
+ target_item = self.data[1][index]
129
+ return session_items, target_item-1
130
+
131
+ def __len__(self):
132
+ return len(self.data[0])
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8758eb8e206047fbdf707cf36202577d679e62b5ec4cc7a54d15c695fedefa2b
3
+ size 8263328
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5706ac3c6e8273608204edde9ff8df523719c5bfbf7d79d8d49044d63689d1c6
3
+ size 8263328
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/test.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed13fb2f8febb707372b1f7956608ad5c8981eca18d70743712ceff3fe63d68b
3
+ size 699182
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/NERD/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b733a5ff31d947af67df8408c7557a02c35c1f9d2cee25cfc4e8c861a4801e30
3
+ size 6312423
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48330dbc4c2fca6d95ac7e17a181e5824bb602cf2c93996670642254eee26a5
3
+ size 4751328
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6adfa8e769d49b084bb341b2f4c9b7af25c25beb9fb79e5eaeab46a134b8a646
3
+ size 4751328
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/test.txt ADDED
Binary file (47.7 kB). View file
 
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/QB/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea60f4f6f5dff12f719addac67ba58232dcbf8c365ebe31988f1e60d65c04b6
3
+ size 416072
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_neg_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8abe18978a3deec718b230960d48084285fdf16ef777f645fe9d3fa2ab77b9
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_pos_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dde3f20ab1ee7b4d323c8c8a8af58ef19148a2f439b67c675c004777b6274a6
3
+ size 3200
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_reason_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfb9acc77f001da4f778233a7713e34355df5b809841dff3a5229b2e0815f53b
3
+ size 2604128
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/H_title_emb100.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:818d7d4cb5d306c1d8d6ceb4b13efee47d2686e5d6d8a71df44ce89fc5ffe658
3
+ size 2604128
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/test.txt ADDED
Binary file (40.2 kB). View file
 
HealthRec/HealthRec_code/GRU4Rec/datasetsHealth/TN/train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45f7052b0cb6052986ce5a5f3cf87011f08290f09f2f48f310ad459a568e3ce7
3
+ size 393873
HealthRec/HealthRec_code/GRU4Rec/gru4rec.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn as nn
4
+
5
+ import torch.nn.functional as F
6
+ from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
7
+
8
+
9
+ class GRU4Rec(nn.Module):
10
+ """
11
+ n_items(int): the number of items
12
+ hidden_size(int): the hidden size of gru
13
+ embedding_dim(int): the dimension of item embedding
14
+ batch_size(int):
15
+ n_layers(int): the number of gru layers
16
+ """
17
+
18
+ def __init__(self, n_items, hidden_size, embedding_dim, batch_size, datasets, n_layers=1):
19
+ super(GRU4Rec, self).__init__()
20
+ self.n_items = n_items
21
+ self.hidden_size = hidden_size
22
+ self.batch_size = batch_size
23
+ self.n_layers = n_layers
24
+ self.embedding_dim = embedding_dim
25
+ self.emb = nn.Embedding(self.n_items, self.embedding_dim, padding_idx=0)
26
+ text_emb_path = './datasetsHealth/' + datasets + '/H_title_emb100.npy'
27
+ textWeights = np.load(text_emb_path)
28
+ self.emb.weight.data.copy_(torch.from_numpy(textWeights))
29
+ self.emb_dropout = nn.Dropout(0.25)
30
+ self.gru = nn.GRU(self.embedding_dim, self.hidden_size, self.n_layers)
31
+ self.a_1 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
32
+ self.a_2 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
33
+ self.v_t = nn.Linear(self.hidden_size, 1, bias=False)
34
+ self.ct_dropout = nn.Dropout(0.5)
35
+ self.b = nn.Linear(self.embedding_dim, 2 * self.hidden_size, bias=False)
36
+ self.sf = nn.Softmax()
37
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
38
+
39
+ def forward(self, seq, lengths):
40
+ hidden = self.init_hidden(seq.size(1))
41
+ embs = self.emb_dropout(self.emb(seq))
42
+ embs = pack_padded_sequence(embs, lengths)
43
+ gru_out, hidden = self.gru(embs, hidden)
44
+ gru_out, lengths = pad_packed_sequence(gru_out)
45
+
46
+ # fetch the last hidden state of last timestamp
47
+ ht = hidden[-1]
48
+ gru_out = gru_out.permute(1, 0, 2)
49
+
50
+ c_global = ht
51
+
52
+ item_embs = self.emb(torch.arange(self.n_items).to(self.device))
53
+ scores = torch.matmul(c_global, item_embs.permute(1, 0))
54
+ # scores = self.sf(scores)
55
+
56
+ return scores
57
+
58
+ def init_hidden(self, batch_size):
59
+ return torch.zeros((self.n_layers, batch_size, self.hidden_size), requires_grad=True).to(self.device)
HealthRec/HealthRec_code/GRU4Rec/healthRec.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
7
+
8
+
9
+
10
+ class healthRec(nn.Module):
11
+
12
+ def __init__(self, n_items, hidden_size, embedding_dim, batch_size, datasets, ui_lambda, item_lambda, user_lambda, n_layers=1):
13
+ super(healthRec, self).__init__()
14
+ self.n_items = n_items
15
+ self.hidden_size = hidden_size
16
+ self.batch_size = batch_size
17
+ self.n_layers = n_layers
18
+ self.embedding_dim = embedding_dim
19
+ self.emb = nn.Embedding(self.n_items, self.embedding_dim, padding_idx=0)
20
+
21
+ self.emb_healthy = nn.Embedding(2, 768, padding_idx=0)
22
+ self.emb_harmful = nn.Embedding(2, 768, padding_idx=0)
23
+ self.emb_reason = nn.Embedding(self.n_items, self.embedding_dim, padding_idx=0)
24
+
25
+ self.ui_lambda = ui_lambda
26
+ self.item_lambda = item_lambda
27
+ self.user_lambda = user_lambda
28
+
29
+ text_emb_path = './datasetsHealth/' + datasets + '/H_title_emb100.npy'
30
+ textWeights = np.load(text_emb_path)
31
+ self.emb.weight.data.copy_(torch.from_numpy(textWeights))
32
+
33
+ reason_emb_path = './datasetsHealth/' + datasets + '/H_reason_emb100.npy'
34
+ reasonWeights = np.load(reason_emb_path)
35
+ self.emb_reason.weight.data.copy_(torch.from_numpy(reasonWeights))
36
+
37
+ self.emb_reason.weight.requires_grad = False
38
+
39
+ health_emb_path = './datasetsHealth/' + datasets + '/H_pos_emb100.npy'
40
+ healWeights = np.load(health_emb_path)
41
+ self.emb_healthy.weight.data.copy_(torch.from_numpy(healWeights))
42
+
43
+ self.emb_healthy.weight.requires_grad = False
44
+
45
+ harm_emb_path = './datasetsHealth/' + datasets + '/H_neg_emb100.npy'
46
+ harmWeights = np.load(harm_emb_path)
47
+ self.emb_harmful.weight.data.copy_(torch.from_numpy(harmWeights))
48
+
49
+ self.emb_harmful.weight.requires_grad = False
50
+
51
+ self.dense_text_health = nn.Linear(768, self.embedding_dim)
52
+ self.dense_text_harm = nn.Linear(768, self.embedding_dim)
53
+
54
+ self.cos_sim = nn.CosineSimilarity(dim=-1)
55
+ self.ul_W1 = nn.Linear(self.embedding_dim, self.embedding_dim)
56
+ self.ul_W2 = nn.Linear(self.embedding_dim, self.embedding_dim)
57
+ self.ul_W3 = nn.Linear(self.embedding_dim, self.embedding_dim)
58
+
59
+ self.merge_item = nn.Linear(self.embedding_dim, self.embedding_dim)
60
+
61
+ # self_attention
62
+ num_heads = 4
63
+ if self.embedding_dim % num_heads != 0: # 整除
64
+ raise ValueError(
65
+ "The hidden size (%d) is not a multiple of the number of attention "
66
+ "heads (%d)" % (self.embedding_dim, num_heads))
67
+ # 参数定义
68
+ self.num_heads = num_heads # 4
69
+ self.attention_head_size = int(self.embedding_dim / self.num_heads) # 16 每个注意力头的维度
70
+ self.all_head_size = int(self.num_heads * self.attention_head_size)
71
+ # query, key, value 的线性变换(上述公式2)
72
+ self.query = nn.Linear(self.embedding_dim, self.embedding_dim) # 128, 128
73
+ self.key = nn.Linear(self.embedding_dim, self.embedding_dim)
74
+ self.value = nn.Linear(self.embedding_dim, self.embedding_dim)
75
+
76
+
77
+ self.emb_dropout = nn.Dropout(0.25)
78
+ self.dropout = nn.Dropout(0.1)
79
+
80
+ self.gru = nn.GRU(self.embedding_dim, self.hidden_size, self.n_layers)
81
+ self.a_1 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
82
+ self.a_2 = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
83
+ self.v_t = nn.Linear(self.hidden_size, 1, bias=False)
84
+ self.ct_dropout = nn.Dropout(0.5)
85
+ self.b = nn.Linear(self.embedding_dim, 2 * self.hidden_size, bias=False)
86
+ self.sf = nn.Softmax()
87
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
88
+
89
+ def transpose_for_scores(self, x, attention_head_size):
90
+ # INPUT: x'shape = [bs, seqlen, hid_size] 假设hid_size=128
91
+ new_x_shape = x.size()[:-1] + (self.num_heads, attention_head_size) # [bs, seqlen, 8, 16]
92
+ x = x.view(*new_x_shape) #
93
+ return x.permute(0, 2, 1, 3)
94
+
95
+ def user_loss(self, user_emb, health_emb, harm_emb):
96
+ # health_sim = self.cos_sim(self.ul_W1(user_emb), self.ul_W2(health_emb))
97
+ # harm_sim = self.cos_sim(self.ul_W1(user_emb), self.ul_W3(harm_emb))
98
+
99
+ health_sim = self.ul_W1(user_emb) * self.ul_W2(health_emb)
100
+ health_sim = torch.sum(health_sim, -1)
101
+ harm_sim = self.ul_W1(user_emb) * self.ul_W3(harm_emb)
102
+ harm_sim = torch.sum(harm_sim, -1)
103
+
104
+ # health_sim = user_emb * health_emb
105
+ # health_sim = torch.sum(health_sim, -1)
106
+ # harm_sim = user_emb + harm_emb
107
+ # harm_sim = torch.sum(harm_sim, -1)
108
+
109
+ ssl_loss = torch.log10(torch.exp(health_sim)) - torch.log10(torch.exp(health_sim) + torch.exp(harm_sim))
110
+ ssl_loss = torch.sum(ssl_loss, 0)
111
+ return -ssl_loss
112
+
113
+ def forward(self, seq, lengths):
114
+ hidden = self.init_hidden(seq.size(1))
115
+ embs = self.emb_dropout(self.emb(seq))
116
+ embs = pack_padded_sequence(embs, lengths)
117
+ gru_out, hidden = self.gru(embs, hidden)
118
+ gru_out, lengths = pad_packed_sequence(gru_out)
119
+
120
+
121
+ # fetch the last hidden state of last timestamp
122
+ ht = hidden[-1]
123
+ gru_out = gru_out.permute(1, 0, 2)
124
+
125
+ c_global = ht
126
+
127
+ mask = torch.where(seq.permute(1, 0) > 0, torch.tensor([1.], device=self.device),
128
+ torch.tensor([0.], device=self.device))
129
+
130
+ item_embs = self.emb(torch.arange(1, self.n_items).to(self.device))
131
+ scores_rec = torch.matmul(c_global, item_embs.permute(1, 0))
132
+ # scores_rec = torch.matmul(c_t, item_embs.permute(1, 0))
133
+
134
+ # Self-attention healthy
135
+ mask_h = mask.float().unsqueeze(-1)
136
+ attention_mask = mask_h.permute(0, 2, 1).unsqueeze(1) # [bs, 1, 1, seqlen] 增加维度
137
+ attention_mask = (1.0 - attention_mask) * -10000.0
138
+
139
+ seq_h = seq.permute(1, 0)
140
+ item_f = self.emb_reason(seq_h)
141
+ K_emb = item_f
142
+ V_emb = item_f
143
+ all_health = torch.cuda.LongTensor(list(K_emb.shape)[0], list(K_emb.shape)[1]).fill_(1)
144
+ Q_emb = self.emb_healthy(all_health)
145
+ Q_emb = self.dense_text_health(Q_emb)
146
+
147
+ mixed_query_layer = self.query(Q_emb) # [bs, seqlen, hid_size]
148
+ mixed_key_layer = self.key(K_emb) # [bs, seqlen, hid_size]
149
+ mixed_value_layer = self.value(V_emb) # [bs, seqlen, hid_size]
150
+
151
+ attention_head_size = int(self.embedding_dim / self.num_heads)
152
+ query_layer = self.transpose_for_scores(mixed_query_layer, attention_head_size) # [bs, 8, seqlen, 16]
153
+ key_layer = self.transpose_for_scores(mixed_key_layer, attention_head_size)
154
+ value_layer = self.transpose_for_scores(mixed_value_layer, attention_head_size) # [bs, 8, seqlen, 16]
155
+ # Take the dot product between "query" and "key" to get the raw attention scores.
156
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
157
+ # [bs, 8, seqlen, 16]*[bs, 8, 16, seqlen] ==> [bs, 8, seqlen, seqlen]
158
+ attention_scores = attention_scores / math.sqrt(attention_head_size) # [bs, 8, seqlen, seqlen]
159
+ attention_scores = attention_scores + attention_mask
160
+ attention_probs = nn.Softmax(dim=-1)(attention_scores) # [bs, 8, seqlen, seqlen]
161
+ # This is actually dropping out entire tokens to attend to, which might
162
+ # seem a bit unusual, but is taken from the original Transformer paper.
163
+ attention_probs = self.dropout(attention_probs)
164
+
165
+ # 矩阵相乘,[bs, 8, seqlen, seqlen]*[bs, 8, seqlen, 16] = [bs, 8, seqlen, 16]
166
+ context_layer = torch.matmul(attention_probs, value_layer) # [bs, 8, seqlen, 16]
167
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # [bs, seqlen, 8, 16]
168
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embedding_dim,) # [bs, seqlen, 128]
169
+ sa_result = context_layer.view(*new_context_layer_shape)
170
+ # last hidden state
171
+ mask_h = mask.long().unsqueeze(-1)
172
+ item_pos = torch.tensor(range(1, V_emb.size()[1] + 1), device='cuda')
173
+ item_pos = item_pos.unsqueeze(0).expand_as(seq_h)
174
+ item_pos = item_pos * mask_h.squeeze(2)
175
+ item_last_num = torch.max(item_pos, 1)[0].unsqueeze(1).expand_as(item_pos)
176
+ last_pos_t = torch.where(item_pos - item_last_num >= 0, torch.tensor([1.0], device='cuda'),
177
+ torch.tensor([0.0], device='cuda'))
178
+ as_last_unit = last_pos_t.unsqueeze(2).expand_as(sa_result) * sa_result
179
+ user_h = torch.sum(as_last_unit, 1)
180
+
181
+ # item_embs_health = self.emb_reason(torch.arange(self.n_items).to(self.device))
182
+ # scores_health = torch.matmul(user_h, item_embs_health.permute(1, 0))
183
+ # scores = scores_rec + self.h_lambda*scores_health
184
+
185
+ item_embs_reason = self.emb_reason(torch.arange(1, self.n_items).to(self.device))
186
+
187
+ item_merge = item_embs_reason
188
+
189
+ user_health = torch.cuda.LongTensor(list(user_h.shape)[0]).fill_(1)
190
+ user_health_emb = self.emb_healthy(user_health)
191
+ user_health_emb = self.dense_text_health(user_health_emb)
192
+
193
+ scores_ui = torch.matmul(user_h, item_merge.permute(1, 0))
194
+ scores_item = torch.matmul(user_health_emb, item_merge.permute(1, 0))
195
+ scores = scores_rec
196
+ # scores = scores_rec
197
+ # scores = self.sf(scores)
198
+
199
+ # ssl loss
200
+
201
+ u_index = torch.cuda.LongTensor(list(user_h.shape)[0]).fill_(1)
202
+
203
+ u_health_emb = self.emb_healthy(u_index)
204
+ u_health_emb = self.dense_text_health(u_health_emb)
205
+
206
+ u_harm_emb = self.emb_harmful(u_index)
207
+ u_harm_emb = self.dense_text_harm(u_harm_emb)
208
+
209
+ ssl_loss = self.user_lambda*self.user_loss(user_h, u_health_emb, u_harm_emb)
210
+
211
+
212
+ return scores, ssl_loss, self.item_lambda * scores_item + self.ui_lambda * scores_ui
213
+
214
+ def init_hidden(self, batch_size):
215
+ return torch.zeros((self.n_layers, batch_size, self.hidden_size), requires_grad=True).to(self.device)