Koulb commited on
Commit
5708b9f
·
verified ·
1 Parent(s): d975267

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2_training/hamiltonian/_eval_launcher.py +12 -0
  2. 2_training/hamiltonian/infer_sc/dataset/00/element.dat +16 -0
  3. 2_training/hamiltonian/infer_sc/dataset/00/info.json +1 -0
  4. 2_training/hamiltonian/infer_sc/dataset/00/lat.dat +3 -0
  5. 2_training/hamiltonian/infer_sc/dataset/00/orbital_types.dat +16 -0
  6. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/config.ini +82 -0
  7. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/result.txt +86 -0
  8. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__init__.py +10 -0
  9. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/__init__.cpython-312.pyc +0 -0
  10. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/data.cpython-312.pyc +0 -0
  11. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/graph.cpython-312.pyc +0 -0
  12. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/kernel.cpython-312.pyc +0 -0
  13. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/model.cpython-312.pyc +0 -0
  14. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/rotate.cpython-312.pyc +0 -0
  15. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/utils.cpython-312.pyc +0 -0
  16. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/data.py +217 -0
  17. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/default.ini +88 -0
  18. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__init__.py +1 -0
  19. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/__init__.cpython-312.pyc +0 -0
  20. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/rmnet.cpython-312.pyc +0 -0
  21. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/license.txt +1 -0
  22. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/rmnet.py +105 -0
  23. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__init__.py +2 -0
  24. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/__init__.cpython-312.pyc +0 -0
  25. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/diff_group_norm.cpython-312.pyc +0 -0
  26. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/graph_norm.cpython-312.pyc +0 -0
  27. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/diff_group_norm.py +109 -0
  28. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/graph_norm.py +60 -0
  29. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/license.txt +22 -0
  30. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__init__.py +1 -0
  31. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/__init__.cpython-312.pyc +0 -0
  32. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/lattice.cpython-312.pyc +0 -0
  33. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/lattice.py +71 -0
  34. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/license.txt +22 -0
  35. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__init__.py +1 -0
  36. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/__init__.cpython-312.pyc +0 -0
  37. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/acsf.cpython-312.pyc +0 -0
  38. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/acsf.py +50 -0
  39. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/license.txt +35 -0
  40. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__init__.py +1 -0
  41. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/__init__.cpython-312.pyc +0 -0
  42. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/representations.cpython-312.pyc +0 -0
  43. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/license.txt +24 -0
  44. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/representations.py +204 -0
  45. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/graph.py +934 -0
  46. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__init__.py +1 -0
  47. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__pycache__/__init__.cpython-312.pyc +0 -0
  48. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__pycache__/pred_ham.cpython-312.pyc +0 -0
  49. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/band_config.json +8 -0
  50. 2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/dense_calc.jl +234 -0
2_training/hamiltonian/_eval_launcher.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, torch
2
+ torch.serialization.add_safe_globals([slice])
3
+ try:
4
+ from torch_geometric.data.data import DataEdgeAttr, DataTensorAttr
5
+ from torch_geometric.data.storage import GlobalStorage
6
+ torch.serialization.add_safe_globals([DataEdgeAttr, DataTensorAttr, GlobalStorage])
7
+ except ImportError:
8
+ pass
9
+ sys.path.insert(0, '/home/apolyukhin/Development/DeepH-E3')
10
+ from deephe3 import DeepHE3Kernel
11
+ kernel = DeepHE3Kernel()
12
+ kernel.eval('/home/apolyukhin/Development/epc_ml/example/diamond/2_training/hamiltonian/infer_sc/eval.ini')
2_training/hamiltonian/infer_sc/dataset/00/element.dat ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 6
2
+ 6
3
+ 6
4
+ 6
5
+ 6
6
+ 6
7
+ 6
8
+ 6
9
+ 6
10
+ 6
11
+ 6
12
+ 6
13
+ 6
14
+ 6
15
+ 6
16
+ 6
2_training/hamiltonian/infer_sc/dataset/00/info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"isspinful": false}
2_training/hamiltonian/infer_sc/dataset/00/lat.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 0.000000000000000000e+00 3.566999997713848014e+00 3.566999997713848014e+00
2
+ 3.566999997713848014e+00 0.000000000000000000e+00 3.566999997713848014e+00
3
+ 3.566999997713848014e+00 3.566999997713848014e+00 0.000000000000000000e+00
2_training/hamiltonian/infer_sc/dataset/00/orbital_types.dat ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0 0 1 1 2
2
+ 0 0 1 1 2
3
+ 0 0 1 1 2
4
+ 0 0 1 1 2
5
+ 0 0 1 1 2
6
+ 0 0 1 1 2
7
+ 0 0 1 1 2
8
+ 0 0 1 1 2
9
+ 0 0 1 1 2
10
+ 0 0 1 1 2
11
+ 0 0 1 1 2
12
+ 0 0 1 1 2
13
+ 0 0 1 1 2
14
+ 0 0 1 1 2
15
+ 0 0 1 1 2
16
+ 0 0 1 1 2
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/config.ini ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [basic]
2
+ graph_dir = /home/apolyukhin/scripts/ml/diamond-qe/deeph-data/graph
3
+ save_dir = /home/apolyukhin/scripts/ml/diamond-qe/pristine-222/reconstruction/aohamiltonian/pred_ham_std
4
+ raw_dir = /home/apolyukhin/scripts/ml/diamond-qe/deeph-data/preprocess
5
+ dataset_name = diamond_qe
6
+ only_get_graph = False
7
+ interface = h5
8
+ target = hamiltonian
9
+ disable_cuda = True
10
+ device = cpu
11
+ num_threads = -1
12
+ save_to_time_folder = False
13
+ save_csv = True
14
+ tb_writer = False
15
+ seed = 42
16
+ multiprocessing = 0
17
+ orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
18
+ o_component = H
19
+ energy_component = summation
20
+ max_element = -1
21
+ statistics = False
22
+ normalizer = False
23
+ boxcox = False
24
+
25
+ [graph]
26
+ radius = -1.0
27
+ max_num_nbr = 0
28
+ create_from_dft = True
29
+ if_lcmp_graph = True
30
+ separate_onsite = False
31
+ new_sp = False
32
+
33
+ [train]
34
+ epochs = 5000
35
+ pretrained =
36
+ resume =
37
+ train_ratio = 0.6
38
+ val_ratio = 0.2
39
+ test_ratio = 0.2
40
+ early_stopping_loss = 0.0
41
+ early_stopping_loss_epoch = [0.000000, 500]
42
+ revert_then_decay = True
43
+ revert_threshold = 30
44
+ revert_decay_epoch = [800, 2000, 3000, 4000]
45
+ revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
46
+ clip_grad = True
47
+ clip_grad_value = 4.2
48
+ switch_sgd = False
49
+ switch_sgd_lr = 1e-4
50
+ switch_sgd_epoch = -1
51
+
52
+ [hyperparameter]
53
+ batch_size = 1
54
+ dtype = float32
55
+ optimizer = adam
56
+ learning_rate = 0.001
57
+ lr_scheduler =
58
+ lr_milestones = []
59
+ momentum = 0.9
60
+ weight_decay = 0
61
+ criterion = MaskMSELoss
62
+ retain_edge_fea = True
63
+ lambda_eij = 0.0
64
+ lambda_ei = 0.1
65
+ lambda_etot = 0.0
66
+
67
+ [network]
68
+ atom_fea_len = 64
69
+ edge_fea_len = 128
70
+ gauss_stop = 6.0
71
+ num_l = 4
72
+ aggr = add
73
+ distance_expansion = GaussianBasis
74
+ if_exp = True
75
+ if_multiplelinear = False
76
+ if_edge_update = True
77
+ if_lcmp = True
78
+ normalization = LayerNorm
79
+ atom_update_net = PAINN
80
+ trainable_gaussians = False
81
+ type_affine = False
82
+
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/result.txt ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ====== CONFIG ======
2
+ [basic]
3
+ graph_dir=/home/apolyukhin/scripts/ml/diamond-qe/deeph-data/graph
4
+ save_dir=/home/apolyukhin/scripts/ml/diamond-qe/pristine-222/reconstruction/aohamiltonian/pred_ham_std
5
+ raw_dir=/home/apolyukhin/scripts/ml/diamond-qe/deeph-data/preprocess
6
+ dataset_name=diamond_qe
7
+ only_get_graph=False
8
+ interface=h5
9
+ target=hamiltonian
10
+ disable_cuda=True
11
+ device=cpu
12
+ num_threads=-1
13
+ save_to_time_folder=False
14
+ save_csv=True
15
+ tb_writer=False
16
+ seed=42
17
+ multiprocessing=0
18
+ orbital=[{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
19
+ o_component=H
20
+ energy_component=summation
21
+ max_element=-1
22
+ statistics=False
23
+ normalizer=False
24
+ boxcox=False
25
+
26
+ [graph]
27
+ radius=-1.0
28
+ max_num_nbr=0
29
+ create_from_dft=True
30
+ if_lcmp_graph=True
31
+ separate_onsite=False
32
+ new_sp=False
33
+
34
+ [train]
35
+ epochs=5000
36
+ pretrained=
37
+ resume=
38
+ train_ratio=0.6
39
+ val_ratio=0.2
40
+ test_ratio=0.2
41
+ early_stopping_loss=0.0
42
+ early_stopping_loss_epoch=[0.000000, 500]
43
+ revert_then_decay=True
44
+ revert_threshold=30
45
+ revert_decay_epoch=[800, 2000, 3000, 4000]
46
+ revert_decay_gamma=[0.4, 0.5, 0.5, 0.4]
47
+ clip_grad=True
48
+ clip_grad_value=4.2
49
+ switch_sgd=False
50
+ switch_sgd_lr=1e-4
51
+ switch_sgd_epoch=-1
52
+
53
+ [hyperparameter]
54
+ batch_size=1
55
+ dtype=float32
56
+ optimizer=adam
57
+ learning_rate=0.001
58
+ lr_scheduler=
59
+ lr_milestones=[]
60
+ momentum=0.9
61
+ weight_decay=0
62
+ criterion=MaskMSELoss
63
+ retain_edge_fea=True
64
+ lambda_eij=0.0
65
+ lambda_ei=0.1
66
+ lambda_etot=0.0
67
+
68
+ [network]
69
+ atom_fea_len=64
70
+ edge_fea_len=128
71
+ gauss_stop=6.0
72
+ num_l=4
73
+ aggr=add
74
+ distance_expansion=GaussianBasis
75
+ if_exp=True
76
+ if_multiplelinear=False
77
+ if_edge_update=True
78
+ if_lcmp=True
79
+ normalization=LayerNorm
80
+ atom_update_net=PAINN
81
+ trainable_gaussians=False
82
+ type_affine=False
83
+
84
+ => load best checkpoint (epoch 1070)
85
+ => Atomic types: [6], spinful: False, the number of atomic types: 1.
86
+ Save processed graph to /home/apolyukhin/scripts/ml/diamond-qe/pristine-222/reconstruction/aohamiltonian/graph.pkl, cost 1.6528115272521973 seconds
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .data import HData
2
+ from .model import HGNN, ExpBernsteinBasis
3
+ from .utils import print_args, Logger, MaskMSELoss, MaskMAELoss, write_ham_npz, write_ham, write_ham_h5, get_config, \
4
+ get_inference_config, get_preprocess_config
5
+ from .graph import Collater, collate_fn, get_graph, load_orbital_types
6
+ from .kernel import DeepHKernel
7
+ from .preprocess import get_rc, OijLoad, GetEEiEij, abacus_parse, siesta_parse
8
+ from .rotate import get_rh, rotate_back, Rotate, dtype_dict
9
+
10
+ __version__ = "0.2.2"
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (925 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/data.cpython-312.pyc ADDED
Binary file (12.2 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/graph.cpython-312.pyc ADDED
Binary file (71.1 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/kernel.cpython-312.pyc ADDED
Binary file (61.3 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/model.cpython-312.pyc ADDED
Binary file (38.4 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/rotate.cpython-312.pyc ADDED
Binary file (18.7 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/__pycache__/utils.cpython-312.pyc ADDED
Binary file (13.3 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/data.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import os
3
+ import time
4
+ import tqdm
5
+
6
+ from pymatgen.core.structure import Structure
7
+ import numpy as np
8
+ import torch
9
+ from torch_geometric.data import InMemoryDataset
10
+ from pathos.multiprocessing import ProcessingPool as Pool
11
+
12
+ from .graph import get_graph
13
+
14
+
15
+ class HData(InMemoryDataset):
16
+ def __init__(self, raw_data_dir: str, graph_dir: str, interface: str, target: str,
17
+ dataset_name: str, multiprocessing: int, radius, max_num_nbr,
18
+ num_l, max_element, create_from_DFT, if_lcmp_graph, separate_onsite, new_sp,
19
+ default_dtype_torch, nums: int = None, transform=None, pre_transform=None, pre_filter=None):
20
+ """
21
+ when interface == 'h5',
22
+ raw_data_dir
23
+ ├── 00
24
+ │ ├──rh.h5 / rdm.h5
25
+ │ ├──rc.h5
26
+ │ ├──element.dat
27
+ │ ├──orbital_types.dat
28
+ │ ├──site_positions.dat
29
+ │ ├──lat.dat
30
+ │ └──info.json
31
+ ├── 01
32
+ │ ├──rh.h5 / rdm.h5
33
+ │ ├──rc.h5
34
+ │ ├──element.dat
35
+ │ ├──orbital_types.dat
36
+ │ ├──site_positions.dat
37
+ │ ├──lat.dat
38
+ │ └──info.json
39
+ ├── 02
40
+ │ ├──rh.h5 / rdm.h5
41
+ │ ├──rc.h5
42
+ │ ├──element.dat
43
+ │ ├──orbital_types.dat
44
+ │ ├──site_positions.dat
45
+ │ ├──lat.dat
46
+ │ └──info.json
47
+ ├── ...
48
+ """
49
+ self.raw_data_dir = raw_data_dir
50
+ assert dataset_name.find('-') == -1, '"-" can not be included in the dataset name'
51
+ if create_from_DFT:
52
+ way_create_graph = 'FromDFT'
53
+ else:
54
+ way_create_graph = f'{radius}r{max_num_nbr}mn'
55
+ if if_lcmp_graph:
56
+ lcmp_str = f'{num_l}l'
57
+ else:
58
+ lcmp_str = 'WithoutLCMP'
59
+ if separate_onsite is True:
60
+ onsite_str = '-SeparateOnsite'
61
+ else:
62
+ onsite_str = ''
63
+ if new_sp:
64
+ new_sp_str = '-NewSP'
65
+ else:
66
+ new_sp_str = ''
67
+ if target == 'hamiltonian':
68
+ title = 'HGraph'
69
+ else:
70
+ raise ValueError('Unknown prediction target: {}'.format(target))
71
+ graph_file_name = f'{title}-{interface}-{dataset_name}-{lcmp_str}-{way_create_graph}{onsite_str}{new_sp_str}.pkl'
72
+ self.data_file = os.path.join(graph_dir, graph_file_name)
73
+ os.makedirs(graph_dir, exist_ok=True)
74
+ self.data, self.slices = None, None
75
+ self.interface = interface
76
+ self.target = target
77
+ self.dataset_name = dataset_name
78
+ self.multiprocessing = multiprocessing
79
+ self.radius = radius
80
+ self.max_num_nbr = max_num_nbr
81
+ self.num_l = num_l
82
+ self.create_from_DFT = create_from_DFT
83
+ self.if_lcmp_graph = if_lcmp_graph
84
+ self.separate_onsite = separate_onsite
85
+ self.new_sp = new_sp
86
+ self.default_dtype_torch = default_dtype_torch
87
+
88
+ self.nums = nums
89
+ self.transform = transform
90
+ self.pre_transform = pre_transform
91
+ self.pre_filter = pre_filter
92
+ self.__indices__ = None
93
+ self.__data_list__ = None
94
+ self._indices = None
95
+ self._data_list = None
96
+
97
+ print(f'Graph data file: {graph_file_name}')
98
+ if os.path.exists(self.data_file):
99
+ print('Use existing graph data file')
100
+ else:
101
+ print('Process new data file......')
102
+ self.process()
103
+ begin = time.time()
104
+ try:
105
+ loaded_data = torch.load(self.data_file)
106
+ except AttributeError:
107
+ raise RuntimeError('Error in loading graph data file, try to delete it and generate the graph file with the current version of PyG')
108
+ if len(loaded_data) == 2:
109
+ warnings.warn('You are using the graph data file with an old version')
110
+ self.data, self.slices = loaded_data
111
+ self.info = {
112
+ "spinful": False,
113
+ "index_to_Z": torch.arange(max_element + 1),
114
+ "Z_to_index": torch.arange(max_element + 1),
115
+ }
116
+ elif len(loaded_data) == 3:
117
+ self.data, self.slices, tmp = loaded_data
118
+ if isinstance(tmp, dict):
119
+ self.info = tmp
120
+ print(f"Atomic types: {self.info['index_to_Z'].tolist()}")
121
+ else:
122
+ warnings.warn('You are using an old version of the graph data file')
123
+ self.info = {
124
+ "spinful": tmp,
125
+ "index_to_Z": torch.arange(max_element + 1),
126
+ "Z_to_index": torch.arange(max_element + 1),
127
+ }
128
+ print(f'Finish loading the processed {len(self)} structures (spinful: {self.info["spinful"]}, '
129
+ f'the number of atomic types: {len(self.info["index_to_Z"])}), cost {time.time() - begin:.0f} seconds')
130
+
131
+ def process_worker(self, folder, **kwargs):
132
+ stru_id = os.path.split(folder)[-1]
133
+
134
+ structure = Structure(np.loadtxt(os.path.join(folder, 'lat.dat')).T,
135
+ np.loadtxt(os.path.join(folder, 'element.dat')),
136
+ np.loadtxt(os.path.join(folder, 'site_positions.dat')).T,
137
+ coords_are_cartesian=True,
138
+ to_unit_cell=False)
139
+
140
+ cart_coords = torch.tensor(structure.cart_coords, dtype=self.default_dtype_torch)
141
+ frac_coords = torch.tensor(structure.frac_coords, dtype=self.default_dtype_torch)
142
+ numbers = torch.tensor(structure.atomic_numbers)
143
+ structure.lattice.matrix.setflags(write=True)
144
+ lattice = torch.tensor(structure.lattice.matrix, dtype=self.default_dtype_torch)
145
+ if self.target == 'E_ij':
146
+ huge_structure = True
147
+ else:
148
+ huge_structure = False
149
+ return get_graph(cart_coords, frac_coords, numbers, stru_id, r=self.radius, max_num_nbr=self.max_num_nbr,
150
+ numerical_tol=1e-8, lattice=lattice, default_dtype_torch=self.default_dtype_torch,
151
+ tb_folder=folder, interface=self.interface, num_l=self.num_l,
152
+ create_from_DFT=self.create_from_DFT, if_lcmp_graph=self.if_lcmp_graph,
153
+ separate_onsite=self.separate_onsite,
154
+ target=self.target, huge_structure=huge_structure, if_new_sp=self.new_sp, **kwargs)
155
+
156
+ def process(self):
157
+ begin = time.time()
158
+ folder_list = []
159
+ for root, dirs, files in os.walk(self.raw_data_dir):
160
+ if (self.interface == 'h5' and 'rc.h5' in files) or (
161
+ self.interface == 'npz' and 'rc.npz' in files):
162
+ folder_list.append(root)
163
+ folder_list = sorted(folder_list)
164
+ folder_list = folder_list[: self.nums]
165
+ if self.dataset_name == 'graphene_450':
166
+ folder_list = folder_list[500:5000:10]
167
+ if self.dataset_name == 'graphene_1500':
168
+ folder_list = folder_list[500:5000:3]
169
+ if self.dataset_name == 'bp_bilayer':
170
+ folder_list = folder_list[:600]
171
+ assert len(folder_list) != 0, "Can not find any structure"
172
+ print('Found %d structures, have cost %d seconds' % (len(folder_list), time.time() - begin))
173
+
174
+ if self.multiprocessing == 0:
175
+ print(f'Use multiprocessing (nodes = num_processors x num_threads = 1 x {torch.get_num_threads()})')
176
+ data_list = [self.process_worker(folder) for folder in tqdm.tqdm(folder_list)]
177
+ else:
178
+ pool_dict = {} if self.multiprocessing < 0 else {'nodes': self.multiprocessing}
179
+ # BS (2023.06.06):
180
+ # The keyword "num_threads" in kernel.py can be used to set the torch threads.
181
+ # The multiprocessing in the "process_worker" is in contradiction with the num_threads utilized in torch.
182
+ # To avoid this conflict, I limit the number of torch threads to one,
183
+ # and recover it when finishing the process_worker.
184
+ torch_num_threads = torch.get_num_threads()
185
+ torch.set_num_threads(1)
186
+
187
+ with Pool(**pool_dict) as pool:
188
+ nodes = pool.nodes
189
+ print(f'Use multiprocessing (nodes = num_processors x num_threads = {nodes} x {torch.get_num_threads()})')
190
+ data_list = list(tqdm.tqdm(pool.imap(self.process_worker, folder_list), total=len(folder_list)))
191
+ torch.set_num_threads(torch_num_threads)
192
+ print('Finish processing %d structures, have cost %d seconds' % (len(data_list), time.time() - begin))
193
+
194
+ if self.pre_filter is not None:
195
+ data_list = [d for d in data_list if self.pre_filter(d)]
196
+ if self.pre_transform is not None:
197
+ data_list = [self.pre_transform(d) for d in data_list]
198
+
199
+ index_to_Z, Z_to_index = self.element_statistics(data_list)
200
+ spinful = data_list[0].spinful
201
+ for d in data_list:
202
+ assert spinful == d.spinful
203
+
204
+ data, slices = self.collate(data_list)
205
+ torch.save((data, slices, dict(spinful=spinful, index_to_Z=index_to_Z, Z_to_index=Z_to_index)), self.data_file)
206
+ print('Finish saving %d structures to %s, have cost %d seconds' % (
207
+ len(data_list), self.data_file, time.time() - begin))
208
+
209
+ def element_statistics(self, data_list):
210
+ index_to_Z, inverse_indices = torch.unique(data_list[0].x, sorted=True, return_inverse=True)
211
+ Z_to_index = torch.full((100,), -1, dtype=torch.int64)
212
+ Z_to_index[index_to_Z] = torch.arange(len(index_to_Z))
213
+
214
+ for data in data_list:
215
+ data.x = Z_to_index[data.x]
216
+
217
+ return index_to_Z, Z_to_index
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/default.ini ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [basic]
2
+ graph_dir = /your/own/path
3
+ save_dir = /your/own/path
4
+ raw_dir = /your/own/path
5
+ dataset_name = your_own_name
6
+ only_get_graph = False
7
+ ;choices = ['h5', 'npz']
8
+ interface = h5
9
+ target = hamiltonian
10
+ disable_cuda = False
11
+ device = cuda:0
12
+ ;-1 for cpu_count(logical=False) // torch.cuda.device_count()
13
+ num_threads = -1
14
+ save_to_time_folder = True
15
+ save_csv = False
16
+ tb_writer = True
17
+ seed = 42
18
+ multiprocessing = 0
19
+ orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
20
+ O_component = H
21
+ energy_component = summation
22
+ max_element = -1
23
+ statistics = False
24
+ normalizer = False
25
+ boxcox = False
26
+
27
+ [graph]
28
+ radius = -1.0
29
+ max_num_nbr = 0
30
+ create_from_DFT = True
31
+ if_lcmp_graph = True
32
+ separate_onsite = False
33
+ new_sp = False
34
+
35
+ [train]
36
+ epochs = 4000
37
+ pretrained =
38
+ resume =
39
+ train_ratio = 0.6
40
+ val_ratio = 0.2
41
+ test_ratio = 0.2
42
+ early_stopping_loss = 0.0
43
+ early_stopping_loss_epoch = [0.000000, 500]
44
+ revert_then_decay = True
45
+ revert_threshold = 30
46
+ revert_decay_epoch = [500, 2000, 3000]
47
+ revert_decay_gamma = [0.4, 0.5, 0.5]
48
+ clip_grad = True
49
+ clip_grad_value = 4.2
50
+ switch_sgd = False
51
+ switch_sgd_lr = 1e-4
52
+ switch_sgd_epoch = -1
53
+
54
+ [hyperparameter]
55
+ batch_size = 3
56
+ dtype = float32
57
+ ;choices = ['sgd', 'sgdm', 'adam', 'lbfgs']
58
+ optimizer = adam
59
+ ;initial learning rate
60
+ learning_rate = 0.001
61
+ ;choices = ['', 'MultiStepLR', 'ReduceLROnPlateau', 'CyclicLR']
62
+ lr_scheduler =
63
+ lr_milestones = []
64
+ momentum = 0.9
65
+ weight_decay = 0
66
+ criterion = MaskMSELoss
67
+ retain_edge_fea = True
68
+ lambda_Eij = 0.0
69
+ lambda_Ei = 0.1
70
+ lambda_Etot = 0.0
71
+
72
+ [network]
73
+ atom_fea_len = 64
74
+ edge_fea_len = 128
75
+ gauss_stop = 6
76
+ ;The number of angular quantum numbers that spherical harmonic functions have
77
+ num_l = 5
78
+ aggr = add
79
+ distance_expansion = GaussianBasis
80
+ if_exp = True
81
+ if_MultipleLinear = False
82
+ if_edge_update = True
83
+ if_lcmp = True
84
+ normalization = LayerNorm
85
+ ;choices = ['CGConv', 'GAT', 'PAINN']
86
+ atom_update_net = CGConv
87
+ trainable_gaussians = False
88
+ type_affine = False
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .rmnet import RBF, cosine_cutoff, ShiftedSoftplus, _eps
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (263 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/rmnet.cpython-312.pyc ADDED
Binary file (4.67 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/license.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ The code in this folder was obtained from "https://github.com/sakuraiiiii/HermNet"
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_HermNet/rmnet.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn, Tensor
5
+ import numpy as np
6
+
7
+
8
+ _eps = 1e-3
9
+
10
+ r"""Tricks: Introducing the parameter `_eps` is to avoid NaN.
11
+ In HVNet and HTNet, a subgraph will be extracted to calculate angles.
12
+ And with all the nodes still be included in the subgraph,
13
+ each hidden state in such a subgraph will contain 0 value.
14
+ In `painn`, the calculation w.r.t $r / \parallel r \parallel$ will be taken.
15
+ If just alternate $r / \parallel r \parallel$ with $r / (\parallel r \parallel + _eps)$,
16
+ NaN will still occur in during the training.
17
+ Considering the following example,
18
+ $$
19
+ (\frac{x}{r+_eps})^\prime = \frac{r+b-\frac{x^2}{r}}{(r+b)^2}
20
+ $$
21
+ where $r = \sqrt{x^2+y^2+z^2}$. It is obvious that NaN will occur.
22
+ Thus the solution is change the norm $r$ as $r^\prime = \sqrt(x^2+y^2+z^2+_eps)$.
23
+ Since $r$ is rotational invariant, $r^2$ is rotational invariant.
24
+ Obviously, $\sqrt(r^2 + _eps)$ is rotational invariant.
25
+ """
26
+ class RBF(nn.Module):
27
+ r"""Radial basis function.
28
+ A modified version of feature engineering in `DimeNet`,
29
+ which is used in `PAINN`.
30
+
31
+ Parameters
32
+ ----------
33
+ rc : float
34
+ Cutoff radius
35
+ l : int
36
+ Parameter in feature engineering in DimeNet
37
+ """
38
+ def __init__(self, rc: float, l: int):
39
+ super(RBF, self).__init__()
40
+ self.rc = rc
41
+ self.l = l
42
+
43
+ def forward(self, x: Tensor):
44
+ ls = torch.arange(1, self.l + 1).float().to(x.device)
45
+ norm = torch.sqrt((x ** 2).sum(dim=-1) + _eps).unsqueeze(-1)
46
+ return torch.sin(math.pi / self.rc * norm@ls.unsqueeze(0)) / norm
47
+
48
+
49
+ class cosine_cutoff(nn.Module):
50
+ r"""Cutoff function in https://aip.scitation.org/doi/pdf/10.1063/1.3553717.
51
+
52
+ Parameters
53
+ ----------
54
+ rc : float
55
+ Cutoff radius
56
+ """
57
+ def __init__(self, rc: float):
58
+ super(cosine_cutoff, self).__init__()
59
+ self.rc = rc
60
+
61
+ def forward(self, x: Tensor):
62
+ norm = torch.norm(x, dim=-1, keepdim=True) + _eps
63
+ return 0.5 * (torch.cos(math.pi * norm / self.rc) + 1)
64
+
65
+ class ShiftedSoftplus(nn.Module):
66
+ r"""
67
+
68
+ Description
69
+ -----------
70
+ Applies the element-wise function:
71
+
72
+ .. math::
73
+ \text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift})
74
+
75
+ Attributes
76
+ ----------
77
+ beta : int
78
+ :math:`\beta` value for the mathematical formulation. Default to 1.
79
+ shift : int
80
+ :math:`\text{shift}` value for the mathematical formulation. Default to 2.
81
+ """
82
+ def __init__(self, beta=1, shift=2, threshold=20):
83
+ super(ShiftedSoftplus, self).__init__()
84
+
85
+ self.shift = shift
86
+ self.softplus = nn.Softplus(beta=beta, threshold=threshold)
87
+
88
+ def forward(self, inputs):
89
+ """
90
+
91
+ Description
92
+ -----------
93
+ Applies the activation function.
94
+
95
+ Parameters
96
+ ----------
97
+ inputs : float32 tensor of shape (N, *)
98
+ * denotes any number of additional dimensions.
99
+
100
+ Returns
101
+ -------
102
+ float32 tensor of shape (N, *)
103
+ Result of applying the activation function to the input.
104
+ """
105
+ return self.softplus(inputs) - np.log(float(self.shift))
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .graph_norm import GraphNorm
2
+ from .diff_group_norm import DiffGroupNorm
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (263 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/diff_group_norm.cpython-312.pyc ADDED
Binary file (6.43 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/graph_norm.cpython-312.pyc ADDED
Binary file (3.76 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/diff_group_norm.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+ from torch.nn import Linear, BatchNorm1d
4
+
5
+
6
+ class DiffGroupNorm(torch.nn.Module):
7
+ r"""The differentiable group normalization layer from the `"Towards Deeper
8
+ Graph Neural Networks with Differentiable Group Normalization"
9
+ <https://arxiv.org/abs/2006.06972>`_ paper, which normalizes node features
10
+ group-wise via a learnable soft cluster assignment
11
+
12
+ .. math::
13
+
14
+ \mathbf{S} = \text{softmax} (\mathbf{X} \mathbf{W})
15
+
16
+ where :math:`\mathbf{W} \in \mathbb{R}^{F \times G}` denotes a trainable
17
+ weight matrix mapping each node into one of :math:`G` clusters.
18
+ Normalization is then performed group-wise via:
19
+
20
+ .. math::
21
+
22
+ \mathbf{X}^{\prime} = \mathbf{X} + \lambda \sum_{i = 1}^G
23
+ \text{BatchNorm}(\mathbf{S}[:, i] \odot \mathbf{X})
24
+
25
+ Args:
26
+ in_channels (int): Size of each input sample :math:`F`.
27
+ groups (int): The number of groups :math:`G`.
28
+ lamda (float, optional): The balancing factor :math:`\lambda` between
29
+ input embeddings and normalized embeddings. (default: :obj:`0.01`)
30
+ eps (float, optional): A value added to the denominator for numerical
31
+ stability. (default: :obj:`1e-5`)
32
+ momentum (float, optional): The value used for the running mean and
33
+ running variance computation. (default: :obj:`0.1`)
34
+ affine (bool, optional): If set to :obj:`True`, this module has
35
+ learnable affine parameters :math:`\gamma` and :math:`\beta`.
36
+ (default: :obj:`True`)
37
+ track_running_stats (bool, optional): If set to :obj:`True`, this
38
+ module tracks the running mean and variance, and when set to
39
+ :obj:`False`, this module does not track such statistics and always
40
+ uses batch statistics in both training and eval modes.
41
+ (default: :obj:`True`)
42
+ """
43
+ def __init__(self, in_channels, groups, lamda=0.01, eps=1e-5, momentum=0.1,
44
+ affine=True, track_running_stats=True):
45
+ super(DiffGroupNorm, self).__init__()
46
+
47
+ self.in_channels = in_channels
48
+ self.groups = groups
49
+ self.lamda = lamda
50
+
51
+ self.lin = Linear(in_channels, groups, bias=False)
52
+ self.norm = BatchNorm1d(groups * in_channels, eps, momentum, affine,
53
+ track_running_stats)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self):
58
+ self.lin.reset_parameters()
59
+ self.norm.reset_parameters()
60
+
61
+ def forward(self, x: Tensor) -> Tensor:
62
+ """"""
63
+ F, G = self.in_channels, self.groups
64
+
65
+ s = self.lin(x).softmax(dim=-1) # [N, G]
66
+ out = s.unsqueeze(-1) * x.unsqueeze(-2) # [N, G, F]
67
+ out = self.norm(out.view(-1, G * F)).view(-1, G, F).sum(-2) # [N, F]
68
+
69
+ return x + self.lamda * out
70
+
71
+ @staticmethod
72
+ def group_distance_ratio(x: Tensor, y: Tensor, eps: float = 1e-5) -> float:
73
+ r"""Measures the ratio of inter-group distance over intra-group
74
+ distance
75
+
76
+ .. math::
77
+ R_{\text{Group}} = \frac{\frac{1}{(C-1)^2} \sum_{i!=j}
78
+ \frac{1}{|\mathbf{X}_i||\mathbf{X}_j|} \sum_{\mathbf{x}_{iv}
79
+ \in \mathbf{X}_i } \sum_{\mathbf{x}_{jv^{\prime}} \in \mathbf{X}_j}
80
+ {\| \mathbf{x}_{iv} - \mathbf{x}_{jv^{\prime}} \|}_2 }{
81
+ \frac{1}{C} \sum_{i} \frac{1}{{|\mathbf{X}_i|}^2}
82
+ \sum_{\mathbf{x}_{iv}, \mathbf{x}_{iv^{\prime}} \in \mathbf{X}_i }
83
+ {\| \mathbf{x}_{iv} - \mathbf{x}_{iv^{\prime}} \|}_2 }
84
+
85
+ where :math:`\mathbf{X}_i` denotes the set of all nodes that belong to
86
+ class :math:`i`, and :math:`C` denotes the total number of classes in
87
+ :obj:`y`.
88
+ """
89
+ num_classes = int(y.max()) + 1
90
+
91
+ numerator = 0.
92
+ for i in range(num_classes):
93
+ mask = y == i
94
+ dist = torch.cdist(x[mask].unsqueeze(0), x[~mask].unsqueeze(0))
95
+ numerator += (1 / dist.numel()) * float(dist.sum())
96
+ numerator *= 1 / (num_classes - 1)**2
97
+
98
+ denominator = 0.
99
+ for i in range(num_classes):
100
+ mask = y == i
101
+ dist = torch.cdist(x[mask].unsqueeze(0), x[mask].unsqueeze(0))
102
+ denominator += (1 / dist.numel()) * float(dist.sum())
103
+ denominator *= 1 / num_classes
104
+
105
+ return numerator / (denominator + eps)
106
+
107
+ def __repr__(self):
108
+ return '{}({}, groups={})'.format(self.__class__.__name__,
109
+ self.in_channels, self.groups)
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/graph_norm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch_scatter import scatter_mean
6
+
7
+ from torch_geometric.nn.inits import zeros, ones
8
+
9
+
10
+ class GraphNorm(torch.nn.Module):
11
+ r"""Applies graph normalization over individual graphs as described in the
12
+ `"GraphNorm: A Principled Approach to Accelerating Graph Neural Network
13
+ Training" <https://arxiv.org/abs/2009.03294>`_ paper
14
+
15
+ .. math::
16
+ \mathbf{x}^{\prime}_i = \frac{\mathbf{x} - \alpha \odot
17
+ \textrm{E}[\mathbf{x}]}
18
+ {\sqrt{\textrm{Var}[\mathbf{x} - \alpha \odot \textrm{E}[\mathbf{x}]]
19
+ + \epsilon}} \odot \gamma + \beta
20
+
21
+ where :math:`\alpha` denotes parameters that learn how much information
22
+ to keep in the mean.
23
+
24
+ Args:
25
+ in_channels (int): Size of each input sample.
26
+ eps (float, optional): A value added to the denominator for numerical
27
+ stability. (default: :obj:`1e-5`)
28
+ """
29
+ def __init__(self, in_channels: int, eps: float = 1e-5):
30
+ super(GraphNorm, self).__init__()
31
+
32
+ self.in_channels = in_channels
33
+ self.eps = eps
34
+
35
+ self.weight = torch.nn.Parameter(torch.Tensor(in_channels))
36
+ self.bias = torch.nn.Parameter(torch.Tensor(in_channels))
37
+ self.mean_scale = torch.nn.Parameter(torch.Tensor(in_channels))
38
+
39
+ self.reset_parameters()
40
+
41
+ def reset_parameters(self):
42
+ ones(self.weight)
43
+ zeros(self.bias)
44
+ ones(self.mean_scale)
45
+
46
+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:
47
+ """"""
48
+ if batch is None:
49
+ batch = x.new_zeros(x.size(0), dtype=torch.long)
50
+
51
+ batch_size = int(batch.max()) + 1
52
+
53
+ mean = scatter_mean(x, batch, dim=0, dim_size=batch_size)[batch]
54
+ out = x - mean * self.mean_scale
55
+ var = scatter_mean(out.pow(2), batch, dim=0, dim_size=batch_size)
56
+ std = (var + self.eps).sqrt()[batch]
57
+ return self.weight * out / std + self.bias
58
+
59
+ def __repr__(self):
60
+ return f'{self.__class__.__name__}({self.in_channels})'
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/license.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/rusty1s/pytorch_geometric", which has the following license:
2
+
3
+
4
+ Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
5
+
6
+ Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ of this software and associated documentation files (the "Software"), to deal
8
+ in the Software without restriction, including without limitation the rights
9
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ copies of the Software, and to permit persons to whom the Software is
11
+ furnished to do so, subject to the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be included in
14
+ all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
+ THE SOFTWARE.
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .lattice import find_neighbors, _one_to_three, _compute_cube_index, _three_to_one
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (290 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/lattice.cpython-312.pyc ADDED
Binary file (3.65 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/lattice.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import numpy as np
3
+
4
+
5
+ # The following internal methods are used in the get_points_in_sphere method.
6
+ def _compute_cube_index(coords: np.ndarray, global_min: float, radius: float
7
+ ) -> np.ndarray:
8
+ """
9
+ Compute the cube index from coordinates
10
+ Args:
11
+ coords: (nx3 array) atom coordinates
12
+ global_min: (float) lower boundary of coordinates
13
+ radius: (float) cutoff radius
14
+
15
+ Returns: (nx3 array) int indices
16
+
17
+ """
18
+ return np.array(np.floor((coords - global_min) / radius), dtype=int)
19
+
20
+ def _three_to_one(label3d: np.ndarray, ny: int, nz: int) -> np.ndarray:
21
+ """
22
+ The reverse of _one_to_three
23
+ """
24
+ return np.array(label3d[:, 0] * ny * nz +
25
+ label3d[:, 1] * nz + label3d[:, 2]).reshape((-1, 1))
26
+
27
+ def _one_to_three(label1d: np.ndarray, ny: int, nz: int) -> np.ndarray:
28
+ """
29
+ Convert a 1D index array to 3D index array
30
+
31
+ Args:
32
+ label1d: (array) 1D index array
33
+ ny: (int) number of cells in y direction
34
+ nz: (int) number of cells in z direction
35
+
36
+ Returns: (nx3) int array of index
37
+
38
+ """
39
+ last = np.mod(label1d, nz)
40
+ second = np.mod((label1d - last) / nz, ny)
41
+ first = (label1d - last - second * nz) / (ny * nz)
42
+ return np.concatenate([first, second, last], axis=1)
43
+
44
+ def find_neighbors(label: np.ndarray, nx: int, ny: int, nz: int):
45
+ """
46
+ Given a cube index, find the neighbor cube indices
47
+
48
+ Args:
49
+ label: (array) (n,) or (n x 3) indice array
50
+ nx: (int) number of cells in y direction
51
+ ny: (int) number of cells in y direction
52
+ nz: (int) number of cells in z direction
53
+
54
+ Returns: neighbor cell indices
55
+
56
+ """
57
+
58
+ array = [[-1, 0, 1]] * 3
59
+ neighbor_vectors = np.array(list(itertools.product(*array)),
60
+ dtype=int)
61
+ if np.shape(label)[1] == 1:
62
+ label3d = _one_to_three(label, ny, nz)
63
+ else:
64
+ label3d = label
65
+ all_labels = label3d[:, None, :] - neighbor_vectors[None, :, :]
66
+ filtered_labels = []
67
+ # filter out out-of-bound labels i.e., label < 0
68
+ for labels in all_labels:
69
+ ind = (labels[:, 0] < nx) * (labels[:, 1] < ny) * (labels[:, 2] < nz) * np.all(labels > -1e-5, axis=1)
70
+ filtered_labels.append(labels[ind])
71
+ return filtered_labels
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/license.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/materialsproject/pymatgen", which has the following license:
2
+
3
+
4
+ The MIT License (MIT)
5
+ Copyright (c) 2011-2012 MIT & The Regents of the University of California, through Lawrence Berkeley National Laboratory
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
8
+ this software and associated documentation files (the "Software"), to deal in
9
+ the Software without restriction, including without limitation the rights to
10
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11
+ the Software, and to permit persons to whom the Software is furnished to do so,
12
+ subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .acsf import GaussianBasis
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (207 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/acsf.cpython-312.pyc ADDED
Binary file (2.45 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/acsf.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ def gaussian_smearing(distances, offset, widths, centered=False):
6
+ if not centered:
7
+ # compute width of Gaussian functions (using an overlap of 1 STDDEV)
8
+ coeff = -0.5 / torch.pow(widths, 2)
9
+ # Use advanced indexing to compute the individual components
10
+ diff = distances[..., None] - offset
11
+ else:
12
+ # if Gaussian functions are centered, use offsets to compute widths
13
+ coeff = -0.5 / torch.pow(offset, 2)
14
+ # if Gaussian functions are centered, no offset is subtracted
15
+ diff = distances[..., None]
16
+ # compute smear distance values
17
+ gauss = torch.exp(coeff * torch.pow(diff, 2))
18
+ return gauss
19
+
20
+
21
+ class GaussianBasis(nn.Module):
22
+ def __init__(
23
+ self, start=0.0, stop=5.0, n_gaussians=50, centered=False, trainable=False
24
+ ):
25
+ super(GaussianBasis, self).__init__()
26
+ # compute offset and width of Gaussian functions
27
+ offset = torch.linspace(start, stop, n_gaussians)
28
+ widths = torch.FloatTensor((offset[1] - offset[0]) * torch.ones_like(offset))
29
+ if trainable:
30
+ self.width = nn.Parameter(widths)
31
+ self.offsets = nn.Parameter(offset)
32
+ else:
33
+ self.register_buffer("width", widths)
34
+ self.register_buffer("offsets", offset)
35
+ self.centered = centered
36
+
37
+ def forward(self, distances):
38
+ """Compute smeared-gaussian distance values.
39
+
40
+ Args:
41
+ distances (torch.Tensor): interatomic distance values of
42
+ (N_b x N_at x N_nbh) shape.
43
+
44
+ Returns:
45
+ torch.Tensor: layer output of (N_b x N_at x N_nbh x N_g) shape.
46
+
47
+ """
48
+ return gaussian_smearing(
49
+ distances, self.offsets, self.width, centered=self.centered
50
+ )
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/license.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/atomistic-machine-learning/schnetpack", which has the following license:
2
+
3
+
4
+ COPYRIGHT
5
+
6
+ Copyright (c) 2018 Kristof Schütt, Michael Gastegger, Pan Kessel, Kim Nicoli
7
+
8
+ All other contributions:
9
+ Copyright (c) 2018, the respective contributors.
10
+ All rights reserved.
11
+
12
+ Each contributor holds copyright over their respective contributions.
13
+ The project versioning (Git) records all such contribution source information.
14
+
15
+ LICENSE
16
+
17
+ The MIT License
18
+
19
+ Permission is hereby granted, free of charge, to any person obtaining a copy
20
+ of this software and associated documentation files (the "Software"), to deal
21
+ in the Software without restriction, including without limitation the rights
22
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23
+ copies of the Software, and to permit persons to whom the Software is
24
+ furnished to do so, subject to the following conditions:
25
+
26
+ The above copyright notice and this permission notice shall be included in all
27
+ copies or substantial portions of the Software.
28
+
29
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35
+ SOFTWARE.
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .representations import SphericalHarmonics
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (228 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/representations.cpython-312.pyc ADDED
Binary file (8.14 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/license.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/mariogeiger/se3cnn/", which has the following license:
2
+
3
+
4
+ MIT License
5
+
6
+ Copyright (c) 2019 Mario Geiger
7
+
8
+ Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ of this software and associated documentation files (the "Software"), to deal
10
+ in the Software without restriction, including without limitation the rights
11
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ copies of the Software, and to permit persons to whom the Software is
13
+ furnished to do so, subject to the following conditions:
14
+
15
+ The above copyright notice and this permission notice shall be included in all
16
+ copies or substantial portions of the Software.
17
+
18
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ SOFTWARE.
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/representations.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def semifactorial(x):
6
+ """Compute the semifactorial function x!!.
7
+
8
+ x!! = x * (x-2) * (x-4) *...
9
+
10
+ Args:
11
+ x: positive int
12
+ Returns:
13
+ float for x!!
14
+ """
15
+ y = 1.
16
+ for n in range(x, 1, -2):
17
+ y *= n
18
+ return y
19
+
20
+
21
+ def pochhammer(x, k):
22
+ """Compute the pochhammer symbol (x)_k.
23
+
24
+ (x)_k = x * (x+1) * (x+2) *...* (x+k-1)
25
+
26
+ Args:
27
+ x: positive int
28
+ Returns:
29
+ float for (x)_k
30
+ """
31
+ xf = float(x)
32
+ for n in range(x+1, x+k):
33
+ xf *= n
34
+ return xf
35
+
36
+ def lpmv(l, m, x):
37
+ """Associated Legendre function including Condon-Shortley phase.
38
+
39
+ Args:
40
+ m: int order
41
+ l: int degree
42
+ x: float argument tensor
43
+ Returns:
44
+ tensor of x-shape
45
+ """
46
+ m_abs = abs(m)
47
+ if m_abs > l:
48
+ return torch.zeros_like(x)
49
+
50
+ # Compute P_m^m
51
+ yold = ((-1)**m_abs * semifactorial(2*m_abs-1)) * torch.pow(1-x*x, m_abs/2)
52
+
53
+ # Compute P_{m+1}^m
54
+ if m_abs != l:
55
+ y = x * (2*m_abs+1) * yold
56
+ else:
57
+ y = yold
58
+
59
+ # Compute P_{l}^m from recursion in P_{l-1}^m and P_{l-2}^m
60
+ for i in range(m_abs+2, l+1):
61
+ tmp = y
62
+ # Inplace speedup
63
+ y = ((2*i-1) / (i-m_abs)) * x * y
64
+ y -= ((i+m_abs-1)/(i-m_abs)) * yold
65
+ yold = tmp
66
+
67
+ if m < 0:
68
+ y *= ((-1)**m / pochhammer(l+m+1, -2*m))
69
+
70
+ return y
71
+
72
+ def tesseral_harmonics(l, m, theta=0., phi=0.):
73
+ """Tesseral spherical harmonic with Condon-Shortley phase.
74
+
75
+ The Tesseral spherical harmonics are also known as the real spherical
76
+ harmonics.
77
+
78
+ Args:
79
+ l: int for degree
80
+ m: int for order, where -l <= m < l
81
+ theta: collatitude or polar angle
82
+ phi: longitude or azimuth
83
+ Returns:
84
+ tensor of shape theta
85
+ """
86
+ assert abs(m) <= l, "absolute value of order m must be <= degree l"
87
+
88
+ N = np.sqrt((2*l+1) / (4*np.pi))
89
+ leg = lpmv(l, abs(m), torch.cos(theta))
90
+ if m == 0:
91
+ return N*leg
92
+ elif m > 0:
93
+ Y = torch.cos(m*phi) * leg
94
+ else:
95
+ Y = torch.sin(abs(m)*phi) * leg
96
+ N *= np.sqrt(2. / pochhammer(l-abs(m)+1, 2*abs(m)))
97
+ Y *= N
98
+ return Y
99
+
100
+ class SphericalHarmonics(object):
101
+ def __init__(self):
102
+ self.leg = {}
103
+
104
+ def clear(self):
105
+ self.leg = {}
106
+
107
+ def negative_lpmv(self, l, m, y):
108
+ """Compute negative order coefficients"""
109
+ if m < 0:
110
+ y *= ((-1)**m / pochhammer(l+m+1, -2*m))
111
+ return y
112
+
113
+ def lpmv(self, l, m, x):
114
+ """Associated Legendre function including Condon-Shortley phase.
115
+
116
+ Args:
117
+ m: int order
118
+ l: int degree
119
+ x: float argument tensor
120
+ Returns:
121
+ tensor of x-shape
122
+ """
123
+ # Check memoized versions
124
+ m_abs = abs(m)
125
+ if (l,m) in self.leg:
126
+ return self.leg[(l,m)]
127
+ elif m_abs > l:
128
+ return None
129
+ elif l == 0:
130
+ self.leg[(l,m)] = torch.ones_like(x)
131
+ return self.leg[(l,m)]
132
+
133
+ # Check if on boundary else recurse solution down to boundary
134
+ if m_abs == l:
135
+ # Compute P_m^m
136
+ y = (-1)**m_abs * semifactorial(2*m_abs-1)
137
+ y *= torch.pow(1-x*x, m_abs/2)
138
+ self.leg[(l,m)] = self.negative_lpmv(l, m, y)
139
+ return self.leg[(l,m)]
140
+ else:
141
+ # Recursively precompute lower degree harmonics
142
+ self.lpmv(l-1, m, x)
143
+
144
+ # Compute P_{l}^m from recursion in P_{l-1}^m and P_{l-2}^m
145
+ # Inplace speedup
146
+ y = ((2*l-1) / (l-m_abs)) * x * self.lpmv(l-1, m_abs, x)
147
+ if l - m_abs > 1:
148
+ y -= ((l+m_abs-1)/(l-m_abs)) * self.leg[(l-2, m_abs)]
149
+ #self.leg[(l, m_abs)] = y
150
+
151
+ if m < 0:
152
+ y = self.negative_lpmv(l, m, y)
153
+ self.leg[(l,m)] = y
154
+
155
+ return self.leg[(l,m)]
156
+
157
+ def get_element(self, l, m, theta, phi):
158
+ """Tesseral spherical harmonic with Condon-Shortley phase.
159
+
160
+ The Tesseral spherical harmonics are also known as the real spherical
161
+ harmonics.
162
+
163
+ Args:
164
+ l: int for degree
165
+ m: int for order, where -l <= m < l
166
+ theta: collatitude or polar angle
167
+ phi: longitude or azimuth
168
+ Returns:
169
+ tensor of shape theta
170
+ """
171
+ assert abs(m) <= l, "absolute value of order m must be <= degree l"
172
+
173
+ N = np.sqrt((2*l+1) / (4*np.pi))
174
+ leg = self.lpmv(l, abs(m), torch.cos(theta))
175
+ if m == 0:
176
+ return N*leg
177
+ elif m > 0:
178
+ Y = torch.cos(m*phi) * leg
179
+ else:
180
+ Y = torch.sin(abs(m)*phi) * leg
181
+ N *= np.sqrt(2. / pochhammer(l-abs(m)+1, 2*abs(m)))
182
+ Y *= N
183
+ return Y
184
+
185
+ def get(self, l, theta, phi, refresh=True):
186
+ """Tesseral harmonic with Condon-Shortley phase.
187
+
188
+ The Tesseral spherical harmonics are also known as the real spherical
189
+ harmonics.
190
+
191
+ Args:
192
+ l: int for degree
193
+ theta: collatitude or polar angle
194
+ phi: longitude or azimuth
195
+ Returns:
196
+ tensor of shape [*theta.shape, 2*l+1]
197
+ """
198
+ results = []
199
+ if refresh:
200
+ self.clear()
201
+ for m in range(-l, l+1):
202
+ results.append(self.get_element(l, m, theta, phi))
203
+ return torch.stack(results, -1)
204
+
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/graph.py ADDED
@@ -0,0 +1,934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import itertools
3
+ import os
4
+ import json
5
+ import warnings
6
+ import math
7
+
8
+ import torch
9
+ import torch_geometric
10
+ from torch_geometric.data import Data, Batch
11
+ import numpy as np
12
+ import h5py
13
+
14
+ from .model import get_spherical_from_cartesian, SphericalHarmonics
15
+ from .from_pymatgen import find_neighbors, _one_to_three, _compute_cube_index, _three_to_one
16
+
17
+
18
+ """
19
+ The function _spherical_harmonics below is come from "https://github.com/e3nn/e3nn", which has the MIT License below
20
+
21
+ ---------------------------------------------------------------------------
22
+ MIT License
23
+
24
+ Euclidean neural networks (e3nn) Copyright (c) 2020, The Regents of the
25
+ University of California, through Lawrence Berkeley National Laboratory
26
+ (subject to receipt of any required approvals from the U.S. Dept. of Energy),
27
+ Ecole Polytechnique Federale de Lausanne (EPFL), Free University of Berlin
28
+ and Kostiantyn Lapchevskyi. All rights reserved.
29
+
30
+ Permission is hereby granted, free of charge, to any person obtaining a copy
31
+ of this software and associated documentation files (the "Software"), to deal
32
+ in the Software without restriction, including without limitation the rights to use,
33
+ copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
34
+ Software, and to permit persons to whom the Software is furnished to do so,
35
+ subject to the following conditions:
36
+
37
+ The above copyright notice and this permission notice shall be included in all
38
+ copies or substantial portions of the Software.
39
+
40
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
43
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
45
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
46
+ SOFTWARE.
47
+ """
48
+ def _spherical_harmonics(lmax: int, x: torch.Tensor, y: torch.Tensor, z: torch.Tensor) -> torch.Tensor:
49
+ sh_0_0 = torch.ones_like(x)
50
+ if lmax == 0:
51
+ return torch.stack([
52
+ sh_0_0,
53
+ ], dim=-1)
54
+
55
+ sh_1_0 = x
56
+ sh_1_1 = y
57
+ sh_1_2 = z
58
+ if lmax == 1:
59
+ return torch.stack([
60
+ sh_0_0,
61
+ sh_1_0, sh_1_1, sh_1_2
62
+ ], dim=-1)
63
+
64
+ sh_2_0 = math.sqrt(3.0) * x * z
65
+ sh_2_1 = math.sqrt(3.0) * x * y
66
+ y2 = y.pow(2)
67
+ x2z2 = x.pow(2) + z.pow(2)
68
+ sh_2_2 = y2 - 0.5 * x2z2
69
+ sh_2_3 = math.sqrt(3.0) * y * z
70
+ sh_2_4 = math.sqrt(3.0) / 2.0 * (z.pow(2) - x.pow(2))
71
+
72
+ if lmax == 2:
73
+ return torch.stack([
74
+ sh_0_0,
75
+ sh_1_0, sh_1_1, sh_1_2,
76
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4
77
+ ], dim=-1)
78
+
79
+ sh_3_0 = math.sqrt(5.0 / 6.0) * (sh_2_0 * z + sh_2_4 * x)
80
+ sh_3_1 = math.sqrt(5.0) * sh_2_0 * y
81
+ sh_3_2 = math.sqrt(3.0 / 8.0) * (4.0 * y2 - x2z2) * x
82
+ sh_3_3 = 0.5 * y * (2.0 * y2 - 3.0 * x2z2)
83
+ sh_3_4 = math.sqrt(3.0 / 8.0) * z * (4.0 * y2 - x2z2)
84
+ sh_3_5 = math.sqrt(5.0) * sh_2_4 * y
85
+ sh_3_6 = math.sqrt(5.0 / 6.0) * (sh_2_4 * z - sh_2_0 * x)
86
+
87
+ if lmax == 3:
88
+ return torch.stack([
89
+ sh_0_0,
90
+ sh_1_0, sh_1_1, sh_1_2,
91
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
92
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6
93
+ ], dim=-1)
94
+
95
+ sh_4_0 = 0.935414346693485*sh_3_0*z + 0.935414346693485*sh_3_6*x
96
+ sh_4_1 = 0.661437827766148*sh_3_0*y + 0.810092587300982*sh_3_1*z + 0.810092587300983*sh_3_5*x
97
+ sh_4_2 = -0.176776695296637*sh_3_0*z + 0.866025403784439*sh_3_1*y + 0.684653196881458*sh_3_2*z + 0.684653196881457*sh_3_4*x + 0.176776695296637*sh_3_6*x
98
+ sh_4_3 = -0.306186217847897*sh_3_1*z + 0.968245836551855*sh_3_2*y + 0.790569415042095*sh_3_3*x + 0.306186217847897*sh_3_5*x
99
+ sh_4_4 = -0.612372435695795*sh_3_2*x + sh_3_3*y - 0.612372435695795*sh_3_4*z
100
+ sh_4_5 = -0.306186217847897*sh_3_1*x + 0.790569415042096*sh_3_3*z + 0.968245836551854*sh_3_4*y - 0.306186217847897*sh_3_5*z
101
+ sh_4_6 = -0.176776695296637*sh_3_0*x - 0.684653196881457*sh_3_2*x + 0.684653196881457*sh_3_4*z + 0.866025403784439*sh_3_5*y - 0.176776695296637*sh_3_6*z
102
+ sh_4_7 = -0.810092587300982*sh_3_1*x + 0.810092587300982*sh_3_5*z + 0.661437827766148*sh_3_6*y
103
+ sh_4_8 = -0.935414346693485*sh_3_0*x + 0.935414346693486*sh_3_6*z
104
+ if lmax == 4:
105
+ return torch.stack([
106
+ sh_0_0,
107
+ sh_1_0, sh_1_1, sh_1_2,
108
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
109
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
110
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8
111
+ ], dim=-1)
112
+
113
+ sh_5_0 = 0.948683298050513*sh_4_0*z + 0.948683298050513*sh_4_8*x
114
+ sh_5_1 = 0.6*sh_4_0*y + 0.848528137423857*sh_4_1*z + 0.848528137423858*sh_4_7*x
115
+ sh_5_2 = -0.14142135623731*sh_4_0*z + 0.8*sh_4_1*y + 0.748331477354788*sh_4_2*z + 0.748331477354788*sh_4_6*x + 0.14142135623731*sh_4_8*x
116
+ sh_5_3 = -0.244948974278318*sh_4_1*z + 0.916515138991168*sh_4_2*y + 0.648074069840786*sh_4_3*z + 0.648074069840787*sh_4_5*x + 0.244948974278318*sh_4_7*x
117
+ sh_5_4 = -0.346410161513776*sh_4_2*z + 0.979795897113272*sh_4_3*y + 0.774596669241484*sh_4_4*x + 0.346410161513776*sh_4_6*x
118
+ sh_5_5 = -0.632455532033676*sh_4_3*x + sh_4_4*y - 0.632455532033676*sh_4_5*z
119
+ sh_5_6 = -0.346410161513776*sh_4_2*x + 0.774596669241483*sh_4_4*z + 0.979795897113273*sh_4_5*y - 0.346410161513776*sh_4_6*z
120
+ sh_5_7 = -0.244948974278318*sh_4_1*x - 0.648074069840787*sh_4_3*x + 0.648074069840786*sh_4_5*z + 0.916515138991169*sh_4_6*y - 0.244948974278318*sh_4_7*z
121
+ sh_5_8 = -0.141421356237309*sh_4_0*x - 0.748331477354788*sh_4_2*x + 0.748331477354788*sh_4_6*z + 0.8*sh_4_7*y - 0.141421356237309*sh_4_8*z
122
+ sh_5_9 = -0.848528137423857*sh_4_1*x + 0.848528137423857*sh_4_7*z + 0.6*sh_4_8*y
123
+ sh_5_10 = -0.948683298050513*sh_4_0*x + 0.948683298050513*sh_4_8*z
124
+ if lmax == 5:
125
+ return torch.stack([
126
+ sh_0_0,
127
+ sh_1_0, sh_1_1, sh_1_2,
128
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
129
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
130
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
131
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10
132
+ ], dim=-1)
133
+
134
+ sh_6_0 = 0.957427107756337*sh_5_0*z + 0.957427107756338*sh_5_10*x
135
+ sh_6_1 = 0.552770798392565*sh_5_0*y + 0.874007373475125*sh_5_1*z + 0.874007373475125*sh_5_9*x
136
+ sh_6_2 = -0.117851130197757*sh_5_0*z + 0.745355992499929*sh_5_1*y + 0.117851130197758*sh_5_10*x + 0.790569415042094*sh_5_2*z + 0.790569415042093*sh_5_8*x
137
+ sh_6_3 = -0.204124145231931*sh_5_1*z + 0.866025403784437*sh_5_2*y + 0.707106781186546*sh_5_3*z + 0.707106781186547*sh_5_7*x + 0.204124145231931*sh_5_9*x
138
+ sh_6_4 = -0.288675134594813*sh_5_2*z + 0.942809041582062*sh_5_3*y + 0.623609564462323*sh_5_4*z + 0.623609564462322*sh_5_6*x + 0.288675134594812*sh_5_8*x
139
+ sh_6_5 = -0.372677996249965*sh_5_3*z + 0.986013297183268*sh_5_4*y + 0.763762615825972*sh_5_5*x + 0.372677996249964*sh_5_7*x
140
+ sh_6_6 = -0.645497224367901*sh_5_4*x + sh_5_5*y - 0.645497224367902*sh_5_6*z
141
+ sh_6_7 = -0.372677996249964*sh_5_3*x + 0.763762615825972*sh_5_5*z + 0.986013297183269*sh_5_6*y - 0.372677996249965*sh_5_7*z
142
+ sh_6_8 = -0.288675134594813*sh_5_2*x - 0.623609564462323*sh_5_4*x + 0.623609564462323*sh_5_6*z + 0.942809041582062*sh_5_7*y - 0.288675134594812*sh_5_8*z
143
+ sh_6_9 = -0.20412414523193*sh_5_1*x - 0.707106781186546*sh_5_3*x + 0.707106781186547*sh_5_7*z + 0.866025403784438*sh_5_8*y - 0.204124145231931*sh_5_9*z
144
+ sh_6_10 = -0.117851130197757*sh_5_0*x - 0.117851130197757*sh_5_10*z - 0.790569415042094*sh_5_2*x + 0.790569415042093*sh_5_8*z + 0.745355992499929*sh_5_9*y
145
+ sh_6_11 = -0.874007373475124*sh_5_1*x + 0.552770798392566*sh_5_10*y + 0.874007373475125*sh_5_9*z
146
+ sh_6_12 = -0.957427107756337*sh_5_0*x + 0.957427107756336*sh_5_10*z
147
+ if lmax == 6:
148
+ return torch.stack([
149
+ sh_0_0,
150
+ sh_1_0, sh_1_1, sh_1_2,
151
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
152
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
153
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
154
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
155
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12
156
+ ], dim=-1)
157
+
158
+ sh_7_0 = 0.963624111659433*sh_6_0*z + 0.963624111659432*sh_6_12*x
159
+ sh_7_1 = 0.515078753637713*sh_6_0*y + 0.892142571199771*sh_6_1*z + 0.892142571199771*sh_6_11*x
160
+ sh_7_2 = -0.101015254455221*sh_6_0*z + 0.699854212223765*sh_6_1*y + 0.82065180664829*sh_6_10*x + 0.101015254455222*sh_6_12*x + 0.82065180664829*sh_6_2*z
161
+ sh_7_3 = -0.174963553055942*sh_6_1*z + 0.174963553055941*sh_6_11*x + 0.82065180664829*sh_6_2*y + 0.749149177264394*sh_6_3*z + 0.749149177264394*sh_6_9*x
162
+ sh_7_4 = 0.247435829652697*sh_6_10*x - 0.247435829652697*sh_6_2*z + 0.903507902905251*sh_6_3*y + 0.677630927178938*sh_6_4*z + 0.677630927178938*sh_6_8*x
163
+ sh_7_5 = -0.31943828249997*sh_6_3*z + 0.95831484749991*sh_6_4*y + 0.606091526731326*sh_6_5*z + 0.606091526731326*sh_6_7*x + 0.31943828249997*sh_6_9*x
164
+ sh_7_6 = -0.391230398217976*sh_6_4*z + 0.989743318610787*sh_6_5*y + 0.755928946018454*sh_6_6*x + 0.391230398217975*sh_6_8*x
165
+ sh_7_7 = -0.654653670707977*sh_6_5*x + sh_6_6*y - 0.654653670707978*sh_6_7*z
166
+ sh_7_8 = -0.391230398217976*sh_6_4*x + 0.755928946018455*sh_6_6*z + 0.989743318610787*sh_6_7*y - 0.391230398217975*sh_6_8*z
167
+ sh_7_9 = -0.31943828249997*sh_6_3*x - 0.606091526731327*sh_6_5*x + 0.606091526731326*sh_6_7*z + 0.95831484749991*sh_6_8*y - 0.31943828249997*sh_6_9*z
168
+ sh_7_10 = -0.247435829652697*sh_6_10*z - 0.247435829652697*sh_6_2*x - 0.677630927178938*sh_6_4*x + 0.677630927178938*sh_6_8*z + 0.903507902905251*sh_6_9*y
169
+ sh_7_11 = -0.174963553055942*sh_6_1*x + 0.820651806648289*sh_6_10*y - 0.174963553055941*sh_6_11*z - 0.749149177264394*sh_6_3*x + 0.749149177264394*sh_6_9*z
170
+ sh_7_12 = -0.101015254455221*sh_6_0*x + 0.82065180664829*sh_6_10*z + 0.699854212223766*sh_6_11*y - 0.101015254455221*sh_6_12*z - 0.82065180664829*sh_6_2*x
171
+ sh_7_13 = -0.892142571199772*sh_6_1*x + 0.892142571199772*sh_6_11*z + 0.515078753637713*sh_6_12*y
172
+ sh_7_14 = -0.963624111659431*sh_6_0*x + 0.963624111659433*sh_6_12*z
173
+ if lmax == 7:
174
+ return torch.stack([
175
+ sh_0_0,
176
+ sh_1_0, sh_1_1, sh_1_2,
177
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
178
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
179
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
180
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
181
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12,
182
+ sh_7_0, sh_7_1, sh_7_2, sh_7_3, sh_7_4, sh_7_5, sh_7_6, sh_7_7, sh_7_8, sh_7_9, sh_7_10, sh_7_11, sh_7_12, sh_7_13, sh_7_14
183
+ ], dim=-1)
184
+
185
+ sh_8_0 = 0.968245836551854*sh_7_0*z + 0.968245836551853*sh_7_14*x
186
+ sh_8_1 = 0.484122918275928*sh_7_0*y + 0.90571104663684*sh_7_1*z + 0.90571104663684*sh_7_13*x
187
+ sh_8_2 = -0.0883883476483189*sh_7_0*z + 0.661437827766148*sh_7_1*y + 0.843171097702002*sh_7_12*x + 0.088388347648318*sh_7_14*x + 0.843171097702003*sh_7_2*z
188
+ sh_8_3 = -0.153093108923948*sh_7_1*z + 0.7806247497998*sh_7_11*x + 0.153093108923949*sh_7_13*x + 0.7806247497998*sh_7_2*y + 0.780624749799799*sh_7_3*z
189
+ sh_8_4 = 0.718070330817253*sh_7_10*x + 0.21650635094611*sh_7_12*x - 0.21650635094611*sh_7_2*z + 0.866025403784439*sh_7_3*y + 0.718070330817254*sh_7_4*z
190
+ sh_8_5 = 0.279508497187474*sh_7_11*x - 0.279508497187474*sh_7_3*z + 0.927024810886958*sh_7_4*y + 0.655505530106345*sh_7_5*z + 0.655505530106344*sh_7_9*x
191
+ sh_8_6 = 0.342326598440729*sh_7_10*x - 0.342326598440729*sh_7_4*z + 0.968245836551854*sh_7_5*y + 0.592927061281572*sh_7_6*z + 0.592927061281571*sh_7_8*x
192
+ sh_8_7 = -0.405046293650492*sh_7_5*z + 0.992156741649221*sh_7_6*y + 0.75*sh_7_7*x + 0.405046293650492*sh_7_9*x
193
+ sh_8_8 = -0.661437827766148*sh_7_6*x + sh_7_7*y - 0.661437827766148*sh_7_8*z
194
+ sh_8_9 = -0.405046293650492*sh_7_5*x + 0.75*sh_7_7*z + 0.992156741649221*sh_7_8*y - 0.405046293650491*sh_7_9*z
195
+ sh_8_10 = -0.342326598440728*sh_7_10*z - 0.342326598440729*sh_7_4*x - 0.592927061281571*sh_7_6*x + 0.592927061281571*sh_7_8*z + 0.968245836551855*sh_7_9*y
196
+ sh_8_11 = 0.927024810886958*sh_7_10*y - 0.279508497187474*sh_7_11*z - 0.279508497187474*sh_7_3*x - 0.655505530106345*sh_7_5*x + 0.655505530106345*sh_7_9*z
197
+ sh_8_12 = 0.718070330817253*sh_7_10*z + 0.866025403784439*sh_7_11*y - 0.216506350946109*sh_7_12*z - 0.216506350946109*sh_7_2*x - 0.718070330817254*sh_7_4*x
198
+ sh_8_13 = -0.153093108923948*sh_7_1*x + 0.7806247497998*sh_7_11*z + 0.7806247497998*sh_7_12*y - 0.153093108923948*sh_7_13*z - 0.780624749799799*sh_7_3*x
199
+ sh_8_14 = -0.0883883476483179*sh_7_0*x + 0.843171097702002*sh_7_12*z + 0.661437827766147*sh_7_13*y - 0.088388347648319*sh_7_14*z - 0.843171097702002*sh_7_2*x
200
+ sh_8_15 = -0.90571104663684*sh_7_1*x + 0.90571104663684*sh_7_13*z + 0.484122918275927*sh_7_14*y
201
+ sh_8_16 = -0.968245836551853*sh_7_0*x + 0.968245836551855*sh_7_14*z
202
+ if lmax == 8:
203
+ return torch.stack([
204
+ sh_0_0,
205
+ sh_1_0, sh_1_1, sh_1_2,
206
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
207
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
208
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
209
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
210
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12,
211
+ sh_7_0, sh_7_1, sh_7_2, sh_7_3, sh_7_4, sh_7_5, sh_7_6, sh_7_7, sh_7_8, sh_7_9, sh_7_10, sh_7_11, sh_7_12, sh_7_13, sh_7_14,
212
+ sh_8_0, sh_8_1, sh_8_2, sh_8_3, sh_8_4, sh_8_5, sh_8_6, sh_8_7, sh_8_8, sh_8_9, sh_8_10, sh_8_11, sh_8_12, sh_8_13, sh_8_14, sh_8_15, sh_8_16
213
+ ], dim=-1)
214
+
215
+ sh_9_0 = 0.97182531580755*sh_8_0*z + 0.971825315807551*sh_8_16*x
216
+ sh_9_1 = 0.458122847290851*sh_8_0*y + 0.916245694581702*sh_8_1*z + 0.916245694581702*sh_8_15*x
217
+ sh_9_2 = -0.078567420131839*sh_8_0*z + 0.62853936105471*sh_8_1*y + 0.86066296582387*sh_8_14*x + 0.0785674201318385*sh_8_16*x + 0.860662965823871*sh_8_2*z
218
+ sh_9_3 = -0.136082763487955*sh_8_1*z + 0.805076485899413*sh_8_13*x + 0.136082763487954*sh_8_15*x + 0.74535599249993*sh_8_2*y + 0.805076485899413*sh_8_3*z
219
+ sh_9_4 = 0.749485420179558*sh_8_12*x + 0.192450089729875*sh_8_14*x - 0.192450089729876*sh_8_2*z + 0.831479419283099*sh_8_3*y + 0.749485420179558*sh_8_4*z
220
+ sh_9_5 = 0.693888666488711*sh_8_11*x + 0.248451997499977*sh_8_13*x - 0.248451997499976*sh_8_3*z + 0.895806416477617*sh_8_4*y + 0.69388866648871*sh_8_5*z
221
+ sh_9_6 = 0.638284738504225*sh_8_10*x + 0.304290309725092*sh_8_12*x - 0.304290309725092*sh_8_4*z + 0.942809041582063*sh_8_5*y + 0.638284738504225*sh_8_6*z
222
+ sh_9_7 = 0.360041149911548*sh_8_11*x - 0.360041149911548*sh_8_5*z + 0.974996043043569*sh_8_6*y + 0.582671582316751*sh_8_7*z + 0.582671582316751*sh_8_9*x
223
+ sh_9_8 = 0.415739709641549*sh_8_10*x - 0.415739709641549*sh_8_6*z + 0.993807989999906*sh_8_7*y + 0.74535599249993*sh_8_8*x
224
+ sh_9_9 = -0.66666666666666666667*sh_8_7*x + sh_8_8*y - 0.66666666666666666667*sh_8_9*z
225
+ sh_9_10 = -0.415739709641549*sh_8_10*z - 0.415739709641549*sh_8_6*x + 0.74535599249993*sh_8_8*z + 0.993807989999906*sh_8_9*y
226
+ sh_9_11 = 0.974996043043568*sh_8_10*y - 0.360041149911547*sh_8_11*z - 0.360041149911548*sh_8_5*x - 0.582671582316751*sh_8_7*x + 0.582671582316751*sh_8_9*z
227
+ sh_9_12 = 0.638284738504225*sh_8_10*z + 0.942809041582063*sh_8_11*y - 0.304290309725092*sh_8_12*z - 0.304290309725092*sh_8_4*x - 0.638284738504225*sh_8_6*x
228
+ sh_9_13 = 0.693888666488711*sh_8_11*z + 0.895806416477617*sh_8_12*y - 0.248451997499977*sh_8_13*z - 0.248451997499977*sh_8_3*x - 0.693888666488711*sh_8_5*x
229
+ sh_9_14 = 0.749485420179558*sh_8_12*z + 0.831479419283098*sh_8_13*y - 0.192450089729875*sh_8_14*z - 0.192450089729875*sh_8_2*x - 0.749485420179558*sh_8_4*x
230
+ sh_9_15 = -0.136082763487954*sh_8_1*x + 0.805076485899413*sh_8_13*z + 0.745355992499929*sh_8_14*y - 0.136082763487955*sh_8_15*z - 0.805076485899413*sh_8_3*x
231
+ sh_9_16 = -0.0785674201318389*sh_8_0*x + 0.86066296582387*sh_8_14*z + 0.628539361054709*sh_8_15*y - 0.0785674201318387*sh_8_16*z - 0.860662965823871*sh_8_2*x
232
+ sh_9_17 = -0.9162456945817*sh_8_1*x + 0.916245694581702*sh_8_15*z + 0.458122847290851*sh_8_16*y
233
+ sh_9_18 = -0.97182531580755*sh_8_0*x + 0.97182531580755*sh_8_16*z
234
+ if lmax == 9:
235
+ return torch.stack([
236
+ sh_0_0,
237
+ sh_1_0, sh_1_1, sh_1_2,
238
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
239
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
240
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
241
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
242
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12,
243
+ sh_7_0, sh_7_1, sh_7_2, sh_7_3, sh_7_4, sh_7_5, sh_7_6, sh_7_7, sh_7_8, sh_7_9, sh_7_10, sh_7_11, sh_7_12, sh_7_13, sh_7_14,
244
+ sh_8_0, sh_8_1, sh_8_2, sh_8_3, sh_8_4, sh_8_5, sh_8_6, sh_8_7, sh_8_8, sh_8_9, sh_8_10, sh_8_11, sh_8_12, sh_8_13, sh_8_14, sh_8_15, sh_8_16,
245
+ sh_9_0, sh_9_1, sh_9_2, sh_9_3, sh_9_4, sh_9_5, sh_9_6, sh_9_7, sh_9_8, sh_9_9, sh_9_10, sh_9_11, sh_9_12, sh_9_13, sh_9_14, sh_9_15, sh_9_16, sh_9_17, sh_9_18
246
+ ], dim=-1)
247
+
248
+ sh_10_0 = 0.974679434480897*sh_9_0*z + 0.974679434480897*sh_9_18*x
249
+ sh_10_1 = 0.435889894354067*sh_9_0*y + 0.924662100445347*sh_9_1*z + 0.924662100445347*sh_9_17*x
250
+ sh_10_2 = -0.0707106781186546*sh_9_0*z + 0.6*sh_9_1*y + 0.874642784226796*sh_9_16*x + 0.070710678118655*sh_9_18*x + 0.874642784226795*sh_9_2*z
251
+ sh_10_3 = -0.122474487139159*sh_9_1*z + 0.824621125123533*sh_9_15*x + 0.122474487139159*sh_9_17*x + 0.714142842854285*sh_9_2*y + 0.824621125123533*sh_9_3*z
252
+ sh_10_4 = 0.774596669241484*sh_9_14*x + 0.173205080756887*sh_9_16*x - 0.173205080756888*sh_9_2*z + 0.8*sh_9_3*y + 0.774596669241483*sh_9_4*z
253
+ sh_10_5 = 0.724568837309472*sh_9_13*x + 0.223606797749979*sh_9_15*x - 0.223606797749979*sh_9_3*z + 0.866025403784438*sh_9_4*y + 0.724568837309472*sh_9_5*z
254
+ sh_10_6 = 0.674536878161602*sh_9_12*x + 0.273861278752583*sh_9_14*x - 0.273861278752583*sh_9_4*z + 0.916515138991168*sh_9_5*y + 0.674536878161602*sh_9_6*z
255
+ sh_10_7 = 0.62449979983984*sh_9_11*x + 0.324037034920393*sh_9_13*x - 0.324037034920393*sh_9_5*z + 0.953939201416946*sh_9_6*y + 0.62449979983984*sh_9_7*z
256
+ sh_10_8 = 0.574456264653803*sh_9_10*x + 0.374165738677394*sh_9_12*x - 0.374165738677394*sh_9_6*z + 0.979795897113272*sh_9_7*y + 0.574456264653803*sh_9_8*z
257
+ sh_10_9 = 0.424264068711928*sh_9_11*x - 0.424264068711929*sh_9_7*z + 0.99498743710662*sh_9_8*y + 0.741619848709567*sh_9_9*x
258
+ sh_10_10 = -0.670820393249937*sh_9_10*z - 0.670820393249937*sh_9_8*x + sh_9_9*y
259
+ sh_10_11 = 0.99498743710662*sh_9_10*y - 0.424264068711929*sh_9_11*z - 0.424264068711929*sh_9_7*x + 0.741619848709567*sh_9_9*z
260
+ sh_10_12 = 0.574456264653803*sh_9_10*z + 0.979795897113272*sh_9_11*y - 0.374165738677395*sh_9_12*z - 0.374165738677394*sh_9_6*x - 0.574456264653803*sh_9_8*x
261
+ sh_10_13 = 0.62449979983984*sh_9_11*z + 0.953939201416946*sh_9_12*y - 0.324037034920393*sh_9_13*z - 0.324037034920393*sh_9_5*x - 0.62449979983984*sh_9_7*x
262
+ sh_10_14 = 0.674536878161602*sh_9_12*z + 0.916515138991168*sh_9_13*y - 0.273861278752583*sh_9_14*z - 0.273861278752583*sh_9_4*x - 0.674536878161603*sh_9_6*x
263
+ sh_10_15 = 0.724568837309472*sh_9_13*z + 0.866025403784439*sh_9_14*y - 0.223606797749979*sh_9_15*z - 0.223606797749979*sh_9_3*x - 0.724568837309472*sh_9_5*x
264
+ sh_10_16 = 0.774596669241484*sh_9_14*z + 0.8*sh_9_15*y - 0.173205080756888*sh_9_16*z - 0.173205080756887*sh_9_2*x - 0.774596669241484*sh_9_4*x
265
+ sh_10_17 = -0.12247448713916*sh_9_1*x + 0.824621125123532*sh_9_15*z + 0.714142842854285*sh_9_16*y - 0.122474487139158*sh_9_17*z - 0.824621125123533*sh_9_3*x
266
+ sh_10_18 = -0.0707106781186548*sh_9_0*x + 0.874642784226796*sh_9_16*z + 0.6*sh_9_17*y - 0.0707106781186546*sh_9_18*z - 0.874642784226796*sh_9_2*x
267
+ sh_10_19 = -0.924662100445348*sh_9_1*x + 0.924662100445347*sh_9_17*z + 0.435889894354068*sh_9_18*y
268
+ sh_10_20 = -0.974679434480898*sh_9_0*x + 0.974679434480896*sh_9_18*z
269
+ if lmax == 10:
270
+ return torch.stack([
271
+ sh_0_0,
272
+ sh_1_0, sh_1_1, sh_1_2,
273
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
274
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
275
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
276
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
277
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12,
278
+ sh_7_0, sh_7_1, sh_7_2, sh_7_3, sh_7_4, sh_7_5, sh_7_6, sh_7_7, sh_7_8, sh_7_9, sh_7_10, sh_7_11, sh_7_12, sh_7_13, sh_7_14,
279
+ sh_8_0, sh_8_1, sh_8_2, sh_8_3, sh_8_4, sh_8_5, sh_8_6, sh_8_7, sh_8_8, sh_8_9, sh_8_10, sh_8_11, sh_8_12, sh_8_13, sh_8_14, sh_8_15, sh_8_16,
280
+ sh_9_0, sh_9_1, sh_9_2, sh_9_3, sh_9_4, sh_9_5, sh_9_6, sh_9_7, sh_9_8, sh_9_9, sh_9_10, sh_9_11, sh_9_12, sh_9_13, sh_9_14, sh_9_15, sh_9_16, sh_9_17, sh_9_18,
281
+ sh_10_0, sh_10_1, sh_10_2, sh_10_3, sh_10_4, sh_10_5, sh_10_6, sh_10_7, sh_10_8, sh_10_9, sh_10_10, sh_10_11, sh_10_12, sh_10_13, sh_10_14, sh_10_15, sh_10_16, sh_10_17, sh_10_18, sh_10_19, sh_10_20
282
+ ], dim=-1)
283
+
284
+ sh_11_0 = 0.977008420918394*sh_10_0*z + 0.977008420918394*sh_10_20*x
285
+ sh_11_1 = 0.416597790450531*sh_10_0*y + 0.9315409787236*sh_10_1*z + 0.931540978723599*sh_10_19*x
286
+ sh_11_2 = -0.0642824346533223*sh_10_0*z + 0.574959574576069*sh_10_1*y + 0.88607221316445*sh_10_18*x + 0.886072213164452*sh_10_2*z + 0.0642824346533226*sh_10_20*x
287
+ sh_11_3 = -0.111340442853781*sh_10_1*z + 0.84060190949577*sh_10_17*x + 0.111340442853781*sh_10_19*x + 0.686348585024614*sh_10_2*y + 0.840601909495769*sh_10_3*z
288
+ sh_11_4 = 0.795129803842541*sh_10_16*x + 0.157459164324444*sh_10_18*x - 0.157459164324443*sh_10_2*z + 0.771389215839871*sh_10_3*y + 0.795129803842541*sh_10_4*z
289
+ sh_11_5 = 0.74965556829412*sh_10_15*x + 0.203278907045435*sh_10_17*x - 0.203278907045436*sh_10_3*z + 0.838140405208444*sh_10_4*y + 0.74965556829412*sh_10_5*z
290
+ sh_11_6 = 0.70417879021953*sh_10_14*x + 0.248964798865985*sh_10_16*x - 0.248964798865985*sh_10_4*z + 0.890723542830247*sh_10_5*y + 0.704178790219531*sh_10_6*z
291
+ sh_11_7 = 0.658698943008611*sh_10_13*x + 0.294579122654903*sh_10_15*x - 0.294579122654903*sh_10_5*z + 0.9315409787236*sh_10_6*y + 0.658698943008611*sh_10_7*z
292
+ sh_11_8 = 0.613215343783275*sh_10_12*x + 0.340150671524904*sh_10_14*x - 0.340150671524904*sh_10_6*z + 0.962091385841669*sh_10_7*y + 0.613215343783274*sh_10_8*z
293
+ sh_11_9 = 0.567727090763491*sh_10_11*x + 0.385694607919935*sh_10_13*x - 0.385694607919935*sh_10_7*z + 0.983332166035633*sh_10_8*y + 0.56772709076349*sh_10_9*z
294
+ sh_11_10 = 0.738548945875997*sh_10_10*x + 0.431219680932052*sh_10_12*x - 0.431219680932052*sh_10_8*z + 0.995859195463938*sh_10_9*y
295
+ sh_11_11 = sh_10_10*y - 0.674199862463242*sh_10_11*z - 0.674199862463243*sh_10_9*x
296
+ sh_11_12 = 0.738548945875996*sh_10_10*z + 0.995859195463939*sh_10_11*y - 0.431219680932052*sh_10_12*z - 0.431219680932053*sh_10_8*x
297
+ sh_11_13 = 0.567727090763491*sh_10_11*z + 0.983332166035634*sh_10_12*y - 0.385694607919935*sh_10_13*z - 0.385694607919935*sh_10_7*x - 0.567727090763491*sh_10_9*x
298
+ sh_11_14 = 0.613215343783275*sh_10_12*z + 0.96209138584167*sh_10_13*y - 0.340150671524904*sh_10_14*z - 0.340150671524904*sh_10_6*x - 0.613215343783274*sh_10_8*x
299
+ sh_11_15 = 0.658698943008611*sh_10_13*z + 0.9315409787236*sh_10_14*y - 0.294579122654903*sh_10_15*z - 0.294579122654903*sh_10_5*x - 0.65869894300861*sh_10_7*x
300
+ sh_11_16 = 0.70417879021953*sh_10_14*z + 0.890723542830246*sh_10_15*y - 0.248964798865985*sh_10_16*z - 0.248964798865985*sh_10_4*x - 0.70417879021953*sh_10_6*x
301
+ sh_11_17 = 0.749655568294121*sh_10_15*z + 0.838140405208444*sh_10_16*y - 0.203278907045436*sh_10_17*z - 0.203278907045435*sh_10_3*x - 0.749655568294119*sh_10_5*x
302
+ sh_11_18 = 0.79512980384254*sh_10_16*z + 0.77138921583987*sh_10_17*y - 0.157459164324443*sh_10_18*z - 0.157459164324444*sh_10_2*x - 0.795129803842541*sh_10_4*x
303
+ sh_11_19 = -0.111340442853782*sh_10_1*x + 0.84060190949577*sh_10_17*z + 0.686348585024614*sh_10_18*y - 0.111340442853781*sh_10_19*z - 0.840601909495769*sh_10_3*x
304
+ sh_11_20 = -0.0642824346533226*sh_10_0*x + 0.886072213164451*sh_10_18*z + 0.57495957457607*sh_10_19*y - 0.886072213164451*sh_10_2*x - 0.0642824346533228*sh_10_20*z
305
+ sh_11_21 = -0.9315409787236*sh_10_1*x + 0.931540978723599*sh_10_19*z + 0.416597790450531*sh_10_20*y
306
+ sh_11_22 = -0.977008420918393*sh_10_0*x + 0.977008420918393*sh_10_20*z
307
+ return torch.stack([
308
+ sh_0_0,
309
+ sh_1_0, sh_1_1, sh_1_2,
310
+ sh_2_0, sh_2_1, sh_2_2, sh_2_3, sh_2_4,
311
+ sh_3_0, sh_3_1, sh_3_2, sh_3_3, sh_3_4, sh_3_5, sh_3_6,
312
+ sh_4_0, sh_4_1, sh_4_2, sh_4_3, sh_4_4, sh_4_5, sh_4_6, sh_4_7, sh_4_8,
313
+ sh_5_0, sh_5_1, sh_5_2, sh_5_3, sh_5_4, sh_5_5, sh_5_6, sh_5_7, sh_5_8, sh_5_9, sh_5_10,
314
+ sh_6_0, sh_6_1, sh_6_2, sh_6_3, sh_6_4, sh_6_5, sh_6_6, sh_6_7, sh_6_8, sh_6_9, sh_6_10, sh_6_11, sh_6_12,
315
+ sh_7_0, sh_7_1, sh_7_2, sh_7_3, sh_7_4, sh_7_5, sh_7_6, sh_7_7, sh_7_8, sh_7_9, sh_7_10, sh_7_11, sh_7_12, sh_7_13, sh_7_14,
316
+ sh_8_0, sh_8_1, sh_8_2, sh_8_3, sh_8_4, sh_8_5, sh_8_6, sh_8_7, sh_8_8, sh_8_9, sh_8_10, sh_8_11, sh_8_12, sh_8_13, sh_8_14, sh_8_15, sh_8_16,
317
+ sh_9_0, sh_9_1, sh_9_2, sh_9_3, sh_9_4, sh_9_5, sh_9_6, sh_9_7, sh_9_8, sh_9_9, sh_9_10, sh_9_11, sh_9_12, sh_9_13, sh_9_14, sh_9_15, sh_9_16, sh_9_17, sh_9_18,
318
+ sh_10_0, sh_10_1, sh_10_2, sh_10_3, sh_10_4, sh_10_5, sh_10_6, sh_10_7, sh_10_8, sh_10_9, sh_10_10, sh_10_11, sh_10_12, sh_10_13, sh_10_14, sh_10_15, sh_10_16, sh_10_17, sh_10_18, sh_10_19, sh_10_20,
319
+ sh_11_0, sh_11_1, sh_11_2, sh_11_3, sh_11_4, sh_11_5, sh_11_6, sh_11_7, sh_11_8, sh_11_9, sh_11_10, sh_11_11, sh_11_12, sh_11_13, sh_11_14, sh_11_15, sh_11_16, sh_11_17, sh_11_18, sh_11_19, sh_11_20, sh_11_21, sh_11_22
320
+ ], dim=-1)
321
+
322
+
323
+ def collate_fn(graph_list):
324
+ return Collater(if_lcmp=True)(graph_list)
325
+
326
+
327
+ class Collater:
328
+ def __init__(self, if_lcmp):
329
+ self.if_lcmp = if_lcmp
330
+ self.flag_pyg2 = (torch_geometric.__version__[0] == '2')
331
+
332
+ def __call__(self, graph_list):
333
+ if self.if_lcmp:
334
+ flag_dict = hasattr(graph_list[0], 'subgraph_dict')
335
+ if self.flag_pyg2:
336
+ assert flag_dict, 'Please generate the graph file with the current version of PyG'
337
+ batch = Batch.from_data_list(graph_list)
338
+
339
+ subgraph_atom_idx_batch = []
340
+ subgraph_edge_idx_batch = []
341
+ subgraph_edge_ang_batch = []
342
+ subgraph_index_batch = []
343
+ if flag_dict:
344
+ for index_batch in range(len(graph_list)):
345
+ (subgraph_atom_idx, subgraph_edge_idx, subgraph_edge_ang,
346
+ subgraph_index) = graph_list[index_batch].subgraph_dict.values()
347
+ if self.flag_pyg2:
348
+ subgraph_atom_idx_batch.append(subgraph_atom_idx + batch._slice_dict['x'][index_batch])
349
+ subgraph_edge_idx_batch.append(subgraph_edge_idx + batch._slice_dict['edge_attr'][index_batch])
350
+ subgraph_index_batch.append(subgraph_index + batch._slice_dict['edge_attr'][index_batch] * 2)
351
+ else:
352
+ subgraph_atom_idx_batch.append(subgraph_atom_idx + batch.__slices__['x'][index_batch])
353
+ subgraph_edge_idx_batch.append(subgraph_edge_idx + batch.__slices__['edge_attr'][index_batch])
354
+ subgraph_index_batch.append(subgraph_index + batch.__slices__['edge_attr'][index_batch] * 2)
355
+ subgraph_edge_ang_batch.append(subgraph_edge_ang)
356
+ else:
357
+ for index_batch, (subgraph_atom_idx, subgraph_edge_idx,
358
+ subgraph_edge_ang, subgraph_index) in enumerate(batch.subgraph):
359
+ subgraph_atom_idx_batch.append(subgraph_atom_idx + batch.__slices__['x'][index_batch])
360
+ subgraph_edge_idx_batch.append(subgraph_edge_idx + batch.__slices__['edge_attr'][index_batch])
361
+ subgraph_edge_ang_batch.append(subgraph_edge_ang)
362
+ subgraph_index_batch.append(subgraph_index + batch.__slices__['edge_attr'][index_batch] * 2)
363
+
364
+ subgraph_atom_idx_batch = torch.cat(subgraph_atom_idx_batch, dim=0)
365
+ subgraph_edge_idx_batch = torch.cat(subgraph_edge_idx_batch, dim=0)
366
+ subgraph_edge_ang_batch = torch.cat(subgraph_edge_ang_batch, dim=0)
367
+ subgraph_index_batch = torch.cat(subgraph_index_batch, dim=0)
368
+
369
+ subgraph = (subgraph_atom_idx_batch, subgraph_edge_idx_batch, subgraph_edge_ang_batch, subgraph_index_batch)
370
+
371
+ return batch, subgraph
372
+ else:
373
+ return Batch.from_data_list(graph_list)
374
+
375
+
376
+ def load_orbital_types(path, return_orbital_types=False):
377
+ orbital_types = []
378
+ with open(path) as f:
379
+ line = f.readline()
380
+ while line:
381
+ orbital_types.append(list(map(int, line.split())))
382
+ line = f.readline()
383
+ atom_num_orbital = [sum(map(lambda x: 2 * x + 1,atom_orbital_types)) for atom_orbital_types in orbital_types]
384
+ if return_orbital_types:
385
+ return atom_num_orbital, orbital_types
386
+ else:
387
+ return atom_num_orbital
388
+
389
+
390
+ """
391
+ The function get_graph below is extended from "https://github.com/materialsproject/pymatgen", which has the MIT License below
392
+
393
+ ---------------------------------------------------------------------------
394
+ The MIT License (MIT)
395
+ Copyright (c) 2011-2012 MIT & The Regents of the University of California, through Lawrence Berkeley National Laboratory
396
+
397
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
398
+ this software and associated documentation files (the "Software"), to deal in
399
+ the Software without restriction, including without limitation the rights to
400
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
401
+ the Software, and to permit persons to whom the Software is furnished to do so,
402
+ subject to the following conditions:
403
+
404
+ The above copyright notice and this permission notice shall be included in all
405
+ copies or substantial portions of the Software.
406
+
407
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
408
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
409
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
410
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
411
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
412
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
413
+ """
414
+ def get_graph(cart_coords, frac_coords, numbers, stru_id, r, max_num_nbr, numerical_tol, lattice,
415
+ default_dtype_torch, tb_folder, interface, num_l, create_from_DFT, if_lcmp_graph,
416
+ separate_onsite, target='hamiltonian', huge_structure=False, only_get_R_list=False, if_new_sp=False,
417
+ if_require_grad=False, fid_rc=None, **kwargs):
418
+ assert target in ['hamiltonian', 'phiVdphi', 'density_matrix', 'O_ij', 'E_ij', 'E_i']
419
+ if target == 'density_matrix' or target == 'O_ij':
420
+ assert interface == 'h5' or interface == 'h5_rc_only'
421
+ if target == 'E_ij':
422
+ assert interface == 'h5'
423
+ assert create_from_DFT is True
424
+ assert separate_onsite is True
425
+ if target == 'E_i':
426
+ assert interface == 'h5'
427
+ assert if_lcmp_graph is False
428
+ assert separate_onsite is True
429
+ if create_from_DFT:
430
+ assert tb_folder is not None
431
+ assert max_num_nbr == 0
432
+ if interface == 'h5_rc_only' and target == 'E_ij':
433
+ raise NotImplementedError
434
+ elif interface == 'h5' or (interface == 'h5_rc_only' and target != 'E_ij'):
435
+ key_atom_list = [[] for _ in range(len(numbers))]
436
+ edge_idx, edge_fea, edge_idx_first = [], [], []
437
+ if if_lcmp_graph:
438
+ atom_idx_connect, edge_idx_connect = [], []
439
+ edge_idx_connect_cursor = 0
440
+ if target == 'E_ij':
441
+ fid = h5py.File(os.path.join(tb_folder, 'E_delta_ee_ij.h5'), 'r')
442
+ else:
443
+ if if_require_grad:
444
+ fid = fid_rc
445
+ else:
446
+ fid = h5py.File(os.path.join(tb_folder, 'rc.h5'), 'r')
447
+ for k in fid.keys():
448
+ key = json.loads(k)
449
+ key_tensor = torch.tensor([key[0], key[1], key[2], key[3] - 1, key[4] - 1]) # (R, i, j) i and j is 0-based index
450
+ if separate_onsite:
451
+ if key[0] == 0 and key[1] == 0 and key[2] == 0 and key[3] == key[4]:
452
+ continue
453
+ key_atom_list[key[3] - 1].append(key_tensor)
454
+ if target != 'E_ij' and not if_require_grad:
455
+ fid.close()
456
+
457
+ for index_first, (cart_coord, keys_tensor) in enumerate(zip(cart_coords, key_atom_list)):
458
+ keys_tensor = torch.stack(keys_tensor)
459
+ cart_coords_j = cart_coords[keys_tensor[:, 4]] + keys_tensor[:, :3].type(default_dtype_torch).to(cart_coords.device) @ lattice.to(cart_coords.device)
460
+ dist = torch.norm(cart_coords_j - cart_coord[None, :], dim=1)
461
+ len_nn = keys_tensor.shape[0]
462
+ edge_idx_first.extend([index_first] * len_nn)
463
+ edge_idx.extend(keys_tensor[:, 4].tolist())
464
+
465
+ edge_fea_single = torch.cat([dist.view(-1, 1), cart_coord.view(1, 3).expand(len_nn, 3)], dim=-1)
466
+ edge_fea_single = torch.cat([edge_fea_single, cart_coords_j, cart_coords[keys_tensor[:, 4]]], dim=-1)
467
+ edge_fea.append(edge_fea_single)
468
+
469
+ if if_lcmp_graph:
470
+ atom_idx_connect.append(keys_tensor[:, 4])
471
+ edge_idx_connect.append(range(edge_idx_connect_cursor, edge_idx_connect_cursor + len_nn))
472
+ edge_idx_connect_cursor += len_nn
473
+
474
+ edge_fea = torch.cat(edge_fea).type(default_dtype_torch)
475
+ edge_idx = torch.stack([torch.LongTensor(edge_idx_first), torch.LongTensor(edge_idx)])
476
+ else:
477
+ raise NotImplemented
478
+ else:
479
+ cart_coords_np = cart_coords.detach().numpy()
480
+ frac_coords_np = frac_coords.detach().numpy()
481
+ lattice_np = lattice.detach().numpy()
482
+ num_atom = cart_coords.shape[0]
483
+
484
+ center_coords_min = np.min(cart_coords_np, axis=0)
485
+ center_coords_max = np.max(cart_coords_np, axis=0)
486
+ global_min = center_coords_min - r - numerical_tol
487
+ global_max = center_coords_max + r + numerical_tol
488
+ global_min_torch = torch.tensor(global_min)
489
+ global_max_torch = torch.tensor(global_max)
490
+
491
+ reciprocal_lattice = np.linalg.inv(lattice_np).T * 2 * np.pi
492
+ recp_len = np.sqrt(np.sum(reciprocal_lattice ** 2, axis=1))
493
+ maxr = np.ceil((r + 0.15) * recp_len / (2 * np.pi))
494
+ nmin = np.floor(np.min(frac_coords_np, axis=0)) - maxr
495
+ nmax = np.ceil(np.max(frac_coords_np, axis=0)) + maxr
496
+ all_ranges = [np.arange(x, y, dtype='int64') for x, y in zip(nmin, nmax)]
497
+ images = torch.tensor(list(itertools.product(*all_ranges))).type_as(lattice)
498
+
499
+ if only_get_R_list:
500
+ return images
501
+
502
+ coords = (images @ lattice)[:, None, :] + cart_coords[None, :, :]
503
+ indices = torch.arange(num_atom).unsqueeze(0).expand(images.shape[0], num_atom)
504
+ valid_index_bool = coords.gt(global_min_torch) * coords.lt(global_max_torch)
505
+ valid_index_bool = valid_index_bool.all(dim=-1)
506
+ valid_coords = coords[valid_index_bool]
507
+ valid_indices = indices[valid_index_bool]
508
+
509
+
510
+ valid_coords_np = valid_coords.detach().numpy()
511
+ all_cube_index = _compute_cube_index(valid_coords_np, global_min, r)
512
+ nx, ny, nz = _compute_cube_index(global_max, global_min, r) + 1
513
+ all_cube_index = _three_to_one(all_cube_index, ny, nz)
514
+ site_cube_index = _three_to_one(_compute_cube_index(cart_coords_np, global_min, r), ny, nz)
515
+ cube_to_coords_index = collections.defaultdict(list) # type: Dict[int, List]
516
+
517
+ for index, cart_coord in enumerate(all_cube_index.ravel()):
518
+ cube_to_coords_index[cart_coord].append(index)
519
+
520
+ site_neighbors = find_neighbors(site_cube_index, nx, ny, nz)
521
+
522
+ edge_idx, edge_fea, edge_idx_first = [], [], []
523
+ if if_lcmp_graph:
524
+ atom_idx_connect, edge_idx_connect = [], []
525
+ edge_idx_connect_cursor = 0
526
+ for index_first, (cart_coord, j) in enumerate(zip(cart_coords, site_neighbors)):
527
+ l1 = np.array(_three_to_one(j, ny, nz), dtype=int).ravel()
528
+ ks = [k for k in l1 if k in cube_to_coords_index]
529
+ nn_coords_index = np.concatenate([cube_to_coords_index[k] for k in ks], axis=0)
530
+ nn_coords = valid_coords[nn_coords_index]
531
+ nn_indices = valid_indices[nn_coords_index]
532
+ dist = torch.norm(nn_coords - cart_coord[None, :], dim=1)
533
+
534
+ if separate_onsite is False:
535
+ nn_coords = nn_coords.squeeze()
536
+ nn_indices = nn_indices.squeeze()
537
+ dist = dist.squeeze()
538
+ else:
539
+ nonzero_index = dist.nonzero(as_tuple=False)
540
+ nn_coords = nn_coords[nonzero_index]
541
+ nn_coords = nn_coords.squeeze(1)
542
+ nn_indices = nn_indices[nonzero_index].view(-1)
543
+ dist = dist[nonzero_index].view(-1)
544
+
545
+ if max_num_nbr > 0:
546
+ if len(dist) >= max_num_nbr:
547
+ dist_top, index_top = dist.topk(max_num_nbr, largest=False, sorted=True)
548
+ edge_idx.extend(nn_indices[index_top])
549
+ if if_lcmp_graph:
550
+ atom_idx_connect.append(nn_indices[index_top])
551
+ edge_idx_first.extend([index_first] * len(index_top))
552
+ edge_fea_single = torch.cat([dist_top.view(-1, 1), cart_coord.view(1, 3).expand(len(index_top), 3)], dim=-1)
553
+ edge_fea_single = torch.cat([edge_fea_single, nn_coords[index_top], cart_coords[nn_indices[index_top]]], dim=-1)
554
+ edge_fea.append(edge_fea_single)
555
+ else:
556
+ warnings.warn("Can not find a number of max_num_nbr atoms within radius")
557
+ edge_idx.extend(nn_indices)
558
+ if if_lcmp_graph:
559
+ atom_idx_connect.append(nn_indices)
560
+ edge_idx_first.extend([index_first] * len(nn_indices))
561
+ edge_fea_single = torch.cat([dist.view(-1, 1), cart_coord.view(1, 3).expand(len(nn_indices), 3)], dim=-1)
562
+ edge_fea_single = torch.cat([edge_fea_single, nn_coords, cart_coords[nn_indices]], dim=-1)
563
+ edge_fea.append(edge_fea_single)
564
+ else:
565
+ index_top = dist.lt(r + numerical_tol)
566
+ edge_idx.extend(nn_indices[index_top])
567
+ if if_lcmp_graph:
568
+ atom_idx_connect.append(nn_indices[index_top])
569
+ edge_idx_first.extend([index_first] * len(nn_indices[index_top]))
570
+ edge_fea_single = torch.cat([dist[index_top].view(-1, 1), cart_coord.view(1, 3).expand(len(nn_indices[index_top]), 3)], dim=-1)
571
+ edge_fea_single = torch.cat([edge_fea_single, nn_coords[index_top], cart_coords[nn_indices[index_top]]], dim=-1)
572
+ edge_fea.append(edge_fea_single)
573
+ if if_lcmp_graph:
574
+ edge_idx_connect.append(range(edge_idx_connect_cursor, edge_idx_connect_cursor + len(atom_idx_connect[-1])))
575
+ edge_idx_connect_cursor += len(atom_idx_connect[-1])
576
+
577
+
578
+ edge_fea = torch.cat(edge_fea).type(default_dtype_torch)
579
+ edge_idx_first = torch.LongTensor(edge_idx_first)
580
+ edge_idx = torch.stack([edge_idx_first, torch.LongTensor(edge_idx)])
581
+
582
+
583
+ if tb_folder is not None:
584
+ if target == 'E_ij':
585
+ read_file_list = ['E_ij.h5', 'E_delta_ee_ij.h5', 'E_xc_ij.h5']
586
+ graph_key_list = ['E_ij', 'E_delta_ee_ij', 'E_xc_ij']
587
+ read_terms_dict = {}
588
+ for read_file, graph_key in zip(read_file_list, graph_key_list):
589
+ read_terms = {}
590
+ fid = h5py.File(os.path.join(tb_folder, read_file), 'r')
591
+ for k, v in fid.items():
592
+ key = json.loads(k)
593
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1)
594
+ read_terms[key] = torch.tensor(v[...], dtype=default_dtype_torch)
595
+ read_terms_dict[graph_key] = read_terms
596
+ fid.close()
597
+
598
+ local_rotation_dict = {}
599
+ if if_require_grad:
600
+ fid = fid_rc
601
+ else:
602
+ fid = h5py.File(os.path.join(tb_folder, 'rc.h5'), 'r')
603
+ for k, v in fid.items():
604
+ key = json.loads(k)
605
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1) # (R, i, j) i and j is 0-based index
606
+ if if_require_grad:
607
+ local_rotation_dict[key] = v
608
+ else:
609
+ local_rotation_dict[key] = torch.tensor(v, dtype=default_dtype_torch)
610
+ if not if_require_grad:
611
+ fid.close()
612
+ elif target == 'E_i':
613
+ read_file_list = ['E_i.h5']
614
+ graph_key_list = ['E_i']
615
+ read_terms_dict = {}
616
+ for read_file, graph_key in zip(read_file_list, graph_key_list):
617
+ read_terms = {}
618
+ fid = h5py.File(os.path.join(tb_folder, read_file), 'r')
619
+ for k, v in fid.items():
620
+ index_i = int(k) # index_i is 0-based index
621
+ read_terms[index_i] = torch.tensor(v[...], dtype=default_dtype_torch)
622
+ fid.close()
623
+ read_terms_dict[graph_key] = read_terms
624
+ else:
625
+ if interface == 'h5' or interface == 'h5_rc_only':
626
+ atom_num_orbital = load_orbital_types(os.path.join(tb_folder, 'orbital_types.dat'))
627
+
628
+ if interface == 'h5':
629
+ with open(os.path.join(tb_folder, 'info.json'), 'r') as info_f:
630
+ info_dict = json.load(info_f)
631
+ spinful = info_dict["isspinful"]
632
+
633
+ if interface == 'h5':
634
+ if target == 'hamiltonian':
635
+ read_file_list = ['rh.h5']
636
+ graph_key_list = ['term_real']
637
+ elif target == 'phiVdphi':
638
+ read_file_list = ['rphiVdphi.h5']
639
+ graph_key_list = ['term_real']
640
+ elif target == 'density_matrix':
641
+ read_file_list = ['rdm.h5']
642
+ graph_key_list = ['term_real']
643
+ elif target == 'O_ij':
644
+ read_file_list = ['rh.h5', 'rdm.h5', 'rvna.h5', 'rvdee.h5', 'rvxc.h5']
645
+ graph_key_list = ['rh', 'rdm', 'rvna', 'rvdee', 'rvxc']
646
+ else:
647
+ raise ValueError('Unknown prediction target: {}'.format(target))
648
+ read_terms_dict = {}
649
+ for read_file, graph_key in zip(read_file_list, graph_key_list):
650
+ read_terms = {}
651
+ fid = h5py.File(os.path.join(tb_folder, read_file), 'r')
652
+ for k, v in fid.items():
653
+ key = json.loads(k)
654
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1)
655
+ if spinful:
656
+ num_orbital_row = atom_num_orbital[key[3]]
657
+ num_orbital_column = atom_num_orbital[key[4]]
658
+ # soc block order:
659
+ # 1 3
660
+ # 4 2
661
+ if target == 'phiVdphi':
662
+ raise NotImplementedError
663
+ else:
664
+ read_value = torch.stack([
665
+ torch.tensor(v[:num_orbital_row, :num_orbital_column].real, dtype=default_dtype_torch),
666
+ torch.tensor(v[:num_orbital_row, :num_orbital_column].imag, dtype=default_dtype_torch),
667
+ torch.tensor(v[num_orbital_row:, num_orbital_column:].real, dtype=default_dtype_torch),
668
+ torch.tensor(v[num_orbital_row:, num_orbital_column:].imag, dtype=default_dtype_torch),
669
+ torch.tensor(v[:num_orbital_row, num_orbital_column:].real, dtype=default_dtype_torch),
670
+ torch.tensor(v[:num_orbital_row, num_orbital_column:].imag, dtype=default_dtype_torch),
671
+ torch.tensor(v[num_orbital_row:, :num_orbital_column].real, dtype=default_dtype_torch),
672
+ torch.tensor(v[num_orbital_row:, :num_orbital_column].imag, dtype=default_dtype_torch)
673
+ ], dim=-1)
674
+ read_terms[key] = read_value
675
+ else:
676
+ read_terms[key] = torch.tensor(v[...], dtype=default_dtype_torch)
677
+ read_terms_dict[graph_key] = read_terms
678
+ fid.close()
679
+
680
+ local_rotation_dict = {}
681
+ if if_require_grad:
682
+ fid = fid_rc
683
+ else:
684
+ fid = h5py.File(os.path.join(tb_folder, 'rc.h5'), 'r')
685
+ for k, v in fid.items():
686
+ key = json.loads(k)
687
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1) # (R, i, j) i and j is 0-based index
688
+ if if_require_grad:
689
+ local_rotation_dict[key] = v
690
+ else:
691
+ local_rotation_dict[key] = torch.tensor(v[...], dtype=default_dtype_torch)
692
+ if not if_require_grad:
693
+ fid.close()
694
+
695
+ max_num_orbital = max(atom_num_orbital)
696
+
697
+ elif interface == 'npz' or interface == 'npz_rc_only':
698
+ spinful = False
699
+ atom_num_orbital = load_orbital_types(os.path.join(tb_folder, 'orbital_types.dat'))
700
+
701
+ if interface == 'npz':
702
+ graph_key_list = ['term_real']
703
+ read_terms_dict = {'term_real': {}}
704
+ hopping_dict_read = np.load(os.path.join(tb_folder, 'rh.npz'))
705
+ for k, v in hopping_dict_read.items():
706
+ key = json.loads(k)
707
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1) # (R, i, j) i and j is 0-based index
708
+ read_terms_dict['term_real'][key] = torch.tensor(v, dtype=default_dtype_torch)
709
+
710
+ local_rotation_dict = {}
711
+ local_rotation_dict_read = np.load(os.path.join(tb_folder, 'rc.npz'))
712
+ for k, v in local_rotation_dict_read.items():
713
+ key = json.loads(k)
714
+ key = (key[0], key[1], key[2], key[3] - 1, key[4] - 1)
715
+ local_rotation_dict[key] = torch.tensor(v, dtype=default_dtype_torch)
716
+
717
+ max_num_orbital = max(atom_num_orbital)
718
+ else:
719
+ raise ValueError(f'Unknown interface: {interface}')
720
+
721
+ if target == 'E_i':
722
+ term_dict = {}
723
+ onsite_term_dict = {}
724
+ for graph_key in graph_key_list:
725
+ term_dict[graph_key] = torch.full([numbers.shape[0], 1], np.nan, dtype=default_dtype_torch)
726
+ for index_atom in range(numbers.shape[0]):
727
+ assert index_atom in read_terms_dict[graph_key_list[0]]
728
+ for graph_key in graph_key_list:
729
+ term_dict[graph_key][index_atom] = read_terms_dict[graph_key][index_atom]
730
+ subgraph = None
731
+ else:
732
+ if interface == 'h5_rc_only' or interface == 'npz_rc_only':
733
+ local_rotation = []
734
+ else:
735
+ term_dict = {}
736
+ onsite_term_dict = {}
737
+ if target == 'E_ij':
738
+ for graph_key in graph_key_list:
739
+ term_dict[graph_key] = torch.full([edge_fea.shape[0], 1], np.nan, dtype=default_dtype_torch)
740
+ local_rotation = []
741
+ if separate_onsite is True:
742
+ for graph_key in graph_key_list:
743
+ onsite_term_dict['onsite_' + graph_key] = torch.full([numbers.shape[0], 1], np.nan, dtype=default_dtype_torch)
744
+ else:
745
+ term_mask = torch.zeros(edge_fea.shape[0], dtype=torch.bool)
746
+ for graph_key in graph_key_list:
747
+ if spinful:
748
+ term_dict[graph_key] = torch.full([edge_fea.shape[0], max_num_orbital, max_num_orbital, 8],
749
+ np.nan, dtype=default_dtype_torch)
750
+ else:
751
+ if target == 'phiVdphi':
752
+ term_dict[graph_key] = torch.full([edge_fea.shape[0], max_num_orbital, max_num_orbital, 3],
753
+ np.nan, dtype=default_dtype_torch)
754
+ else:
755
+ term_dict[graph_key] = torch.full([edge_fea.shape[0], max_num_orbital, max_num_orbital],
756
+ np.nan, dtype=default_dtype_torch)
757
+ local_rotation = []
758
+ if separate_onsite is True:
759
+ for graph_key in graph_key_list:
760
+ if spinful:
761
+ onsite_term_dict['onsite_' + graph_key] = torch.full(
762
+ [numbers.shape[0], max_num_orbital, max_num_orbital, 8],
763
+ np.nan, dtype=default_dtype_torch)
764
+ else:
765
+ if target == 'phiVdphi':
766
+ onsite_term_dict['onsite_' + graph_key] = torch.full(
767
+ [numbers.shape[0], max_num_orbital, max_num_orbital, 3],
768
+ np.nan, dtype=default_dtype_torch)
769
+ else:
770
+ onsite_term_dict['onsite_' + graph_key] = torch.full(
771
+ [numbers.shape[0], max_num_orbital, max_num_orbital],
772
+ np.nan, dtype=default_dtype_torch)
773
+
774
+ inv_lattice = torch.inverse(lattice).type(default_dtype_torch)
775
+ for index_edge in range(edge_fea.shape[0]):
776
+ # h_{i0, jR} i and j is 0-based index
777
+ R = torch.round(edge_fea[index_edge, 4:7].cpu() @ inv_lattice - edge_fea[index_edge, 7:10].cpu() @ inv_lattice).int().tolist()
778
+ i, j = edge_idx[:, index_edge]
779
+
780
+ key_term = (*R, i.item(), j.item())
781
+ if interface == 'h5_rc_only' or interface == 'npz_rc_only':
782
+ local_rotation.append(local_rotation_dict[key_term])
783
+ else:
784
+ if key_term in read_terms_dict[graph_key_list[0]]:
785
+ for graph_key in graph_key_list:
786
+ if target == 'E_ij':
787
+ term_dict[graph_key][index_edge] = read_terms_dict[graph_key][key_term]
788
+ else:
789
+ term_mask[index_edge] = True
790
+ if spinful:
791
+ term_dict[graph_key][index_edge, :atom_num_orbital[i], :atom_num_orbital[j], :] = read_terms_dict[graph_key][key_term]
792
+ else:
793
+ term_dict[graph_key][index_edge, :atom_num_orbital[i], :atom_num_orbital[j]] = read_terms_dict[graph_key][key_term]
794
+ local_rotation.append(local_rotation_dict[key_term])
795
+ else:
796
+ raise NotImplementedError(
797
+ "Not yet have support for graph radius including hopping without calculation")
798
+
799
+ if separate_onsite is True and interface != 'h5_rc_only' and interface != 'npz_rc_only':
800
+ for index_atom in range(numbers.shape[0]):
801
+ key_term = (0, 0, 0, index_atom, index_atom)
802
+ assert key_term in read_terms_dict[graph_key_list[0]]
803
+ for graph_key in graph_key_list:
804
+ if target == 'E_ij':
805
+ onsite_term_dict['onsite_' + graph_key][index_atom] = read_terms_dict[graph_key][key_term]
806
+ else:
807
+ if spinful:
808
+ onsite_term_dict['onsite_' + graph_key][index_atom, :atom_num_orbital[i], :atom_num_orbital[j], :] = \
809
+ read_terms_dict[graph_key][key_term]
810
+ else:
811
+ onsite_term_dict['onsite_' + graph_key][index_atom, :atom_num_orbital[i], :atom_num_orbital[j]] = \
812
+ read_terms_dict[graph_key][key_term]
813
+
814
+ if if_lcmp_graph:
815
+ local_rotation = torch.stack(local_rotation, dim=0)
816
+ assert local_rotation.shape[0] == edge_fea.shape[0]
817
+ r_vec = edge_fea[:, 1:4] - edge_fea[:, 4:7]
818
+ r_vec = r_vec.unsqueeze(1)
819
+ if huge_structure is False:
820
+ r_vec = torch.matmul(r_vec[:, None, :, :], local_rotation[None, :, :, :].to(r_vec.device)).reshape(-1, 3)
821
+ if if_new_sp:
822
+ r_vec = torch.nn.functional.normalize(r_vec, dim=-1)
823
+ angular_expansion = _spherical_harmonics(num_l - 1, -r_vec[..., 2], r_vec[..., 0],
824
+ r_vec[..., 1])
825
+ angular_expansion.mul_(torch.cat([
826
+ (math.sqrt(2 * l + 1) / math.sqrt(4 * math.pi)) * torch.ones(2 * l + 1,
827
+ dtype=angular_expansion.dtype,
828
+ device=angular_expansion.device)
829
+ for l in range(num_l)
830
+ ]))
831
+ angular_expansion = angular_expansion.reshape(edge_fea.shape[0], edge_fea.shape[0], -1)
832
+ else:
833
+ r_vec_sp = get_spherical_from_cartesian(r_vec)
834
+ sph_harm_func = SphericalHarmonics()
835
+ angular_expansion = []
836
+ for l in range(num_l):
837
+ angular_expansion.append(sph_harm_func.get(l, r_vec_sp[:, 0], r_vec_sp[:, 1]))
838
+ angular_expansion = torch.cat(angular_expansion, dim=-1).reshape(edge_fea.shape[0], edge_fea.shape[0], -1)
839
+
840
+ subgraph_atom_idx_list = []
841
+ subgraph_edge_idx_list = []
842
+ subgraph_edge_ang_list = []
843
+ subgraph_index = []
844
+ index_cursor = 0
845
+
846
+ for index in range(edge_fea.shape[0]):
847
+ # h_{i0, jR}
848
+ i, j = edge_idx[:, index]
849
+ subgraph_atom_idx = torch.stack([i.repeat(len(atom_idx_connect[i])), atom_idx_connect[i]]).T
850
+ subgraph_edge_idx = torch.LongTensor(edge_idx_connect[i])
851
+ if huge_structure:
852
+ r_vec_tmp = torch.matmul(r_vec[subgraph_edge_idx, :, :], local_rotation[index, :, :].to(r_vec.device)).reshape(-1, 3)
853
+ if if_new_sp:
854
+ r_vec_tmp = torch.nn.functional.normalize(r_vec_tmp, dim=-1)
855
+ subgraph_edge_ang = _spherical_harmonics(num_l - 1, -r_vec_tmp[..., 2], r_vec_tmp[..., 0], r_vec_tmp[..., 1])
856
+ subgraph_edge_ang.mul_(torch.cat([
857
+ (math.sqrt(2 * l + 1) / math.sqrt(4 * math.pi)) * torch.ones(2 * l + 1,
858
+ dtype=subgraph_edge_ang.dtype,
859
+ device=subgraph_edge_ang.device)
860
+ for l in range(num_l)
861
+ ]))
862
+ else:
863
+ r_vec_sp = get_spherical_from_cartesian(r_vec_tmp)
864
+ sph_harm_func = SphericalHarmonics()
865
+ angular_expansion = []
866
+ for l in range(num_l):
867
+ angular_expansion.append(sph_harm_func.get(l, r_vec_sp[:, 0], r_vec_sp[:, 1]))
868
+ subgraph_edge_ang = torch.cat(angular_expansion, dim=-1).reshape(-1, num_l ** 2)
869
+ else:
870
+ subgraph_edge_ang = angular_expansion[subgraph_edge_idx, index, :]
871
+
872
+ subgraph_atom_idx_list.append(subgraph_atom_idx)
873
+ subgraph_edge_idx_list.append(subgraph_edge_idx)
874
+ subgraph_edge_ang_list.append(subgraph_edge_ang)
875
+ subgraph_index += [index_cursor] * len(atom_idx_connect[i])
876
+ index_cursor += 1
877
+
878
+ subgraph_atom_idx = torch.stack([j.repeat(len(atom_idx_connect[j])), atom_idx_connect[j]]).T
879
+ subgraph_edge_idx = torch.LongTensor(edge_idx_connect[j])
880
+ if huge_structure:
881
+ r_vec_tmp = torch.matmul(r_vec[subgraph_edge_idx, :, :], local_rotation[index, :, :].to(r_vec.device)).reshape(-1, 3)
882
+ if if_new_sp:
883
+ r_vec_tmp = torch.nn.functional.normalize(r_vec_tmp, dim=-1)
884
+ subgraph_edge_ang = _spherical_harmonics(num_l - 1, -r_vec_tmp[..., 2], r_vec_tmp[..., 0], r_vec_tmp[..., 1])
885
+ subgraph_edge_ang.mul_(torch.cat([
886
+ (math.sqrt(2 * l + 1) / math.sqrt(4 * math.pi)) * torch.ones(2 * l + 1,
887
+ dtype=subgraph_edge_ang.dtype,
888
+ device=subgraph_edge_ang.device)
889
+ for l in range(num_l)
890
+ ]))
891
+ else:
892
+ r_vec_sp = get_spherical_from_cartesian(r_vec_tmp)
893
+ sph_harm_func = SphericalHarmonics()
894
+ angular_expansion = []
895
+ for l in range(num_l):
896
+ angular_expansion.append(sph_harm_func.get(l, r_vec_sp[:, 0], r_vec_sp[:, 1]))
897
+ subgraph_edge_ang = torch.cat(angular_expansion, dim=-1).reshape(-1, num_l ** 2)
898
+ else:
899
+ subgraph_edge_ang = angular_expansion[subgraph_edge_idx, index, :]
900
+ subgraph_atom_idx_list.append(subgraph_atom_idx)
901
+ subgraph_edge_idx_list.append(subgraph_edge_idx)
902
+ subgraph_edge_ang_list.append(subgraph_edge_ang)
903
+ subgraph_index += [index_cursor] * len(atom_idx_connect[j])
904
+ index_cursor += 1
905
+ subgraph = {"subgraph_atom_idx":torch.cat(subgraph_atom_idx_list, dim=0),
906
+ "subgraph_edge_idx":torch.cat(subgraph_edge_idx_list, dim=0),
907
+ "subgraph_edge_ang":torch.cat(subgraph_edge_ang_list, dim=0),
908
+ "subgraph_index":torch.LongTensor(subgraph_index)}
909
+ else:
910
+ subgraph = None
911
+
912
+ if interface == 'h5_rc_only' or interface == 'npz_rc_only':
913
+ data = Data(x=numbers, edge_index=edge_idx, edge_attr=edge_fea, stru_id=stru_id, term_mask=None,
914
+ term_real=None, onsite_term_real=None,
915
+ atom_num_orbital=torch.tensor(atom_num_orbital),
916
+ subgraph_dict=subgraph,
917
+ **kwargs)
918
+ else:
919
+ if target == 'E_ij' or target == 'E_i':
920
+ data = Data(x=numbers, edge_index=edge_idx, edge_attr=edge_fea, stru_id=stru_id,
921
+ **term_dict, **onsite_term_dict,
922
+ subgraph_dict=subgraph,
923
+ spinful=False,
924
+ **kwargs)
925
+ else:
926
+ data = Data(x=numbers, edge_index=edge_idx, edge_attr=edge_fea, stru_id=stru_id, term_mask=term_mask,
927
+ **term_dict, **onsite_term_dict,
928
+ atom_num_orbital=torch.tensor(atom_num_orbital),
929
+ subgraph_dict=subgraph,
930
+ spinful=spinful,
931
+ **kwargs)
932
+ else:
933
+ data = Data(x=numbers, edge_index=edge_idx, edge_attr=edge_fea, stru_id=stru_id, **kwargs)
934
+ return data
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .pred_ham import predict, predict_with_grad
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (230 Bytes). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/__pycache__/pred_ham.cpython-312.pyc ADDED
Binary file (28.8 kB). View file
 
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/band_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "calc_job": "band",
3
+ "which_k": 0,
4
+ "fermi_level": -3.82373,
5
+ "max_iter": 300,
6
+ "num_band": 50,
7
+ "k_data": ["15 0 0 0 0.5 0.5 0 Γ M", "15 0.5 0.5 0 0.3333333333333333 0.6666666666666667 0 M K", "15 0.3333333333333333 0.6666666666666667 0 0 0 0 K Γ"]
8
+ }
2_training/hamiltonian/infer_sc/dataset/00/pred_ham_std/src/deeph/inference/dense_calc.jl ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ using DelimitedFiles, LinearAlgebra, JSON
2
+ using HDF5
3
+ using ArgParse
4
+ using SparseArrays
5
+ using Arpack
6
+ using JLD
7
+ # BLAS.set_num_threads(1)
8
+
9
+ const ev2Hartree = 0.036749324533634074
10
+ const Bohr2Ang = 0.529177249
11
+ const default_dtype = Complex{Float64}
12
+
13
+
14
+ function parse_commandline()
15
+ s = ArgParseSettings()
16
+ @add_arg_table! s begin
17
+ "--input_dir", "-i"
18
+ help = "path of rlat.dat, orbital_types.dat, site_positions.dat, hamiltonians_pred.h5, and overlaps.h5"
19
+ arg_type = String
20
+ default = "./"
21
+ "--output_dir", "-o"
22
+ help = "path of output openmx.Band"
23
+ arg_type = String
24
+ default = "./"
25
+ "--config"
26
+ help = "config file in the format of JSON"
27
+ arg_type = String
28
+ "--ill_project"
29
+ help = "projects out the eigenvectors of the overlap matrix that correspond to eigenvalues smaller than ill_threshold"
30
+ arg_type = Bool
31
+ default = true
32
+ "--ill_threshold"
33
+ help = "threshold for ill_project"
34
+ arg_type = Float64
35
+ default = 5e-4
36
+ end
37
+ return parse_args(s)
38
+ end
39
+
40
+
41
+ function _create_dict_h5(filename::String)
42
+ fid = h5open(filename, "r")
43
+ T = eltype(fid[keys(fid)[1]])
44
+ d_out = Dict{Array{Int64,1}, Array{T, 2}}()
45
+ for key in keys(fid)
46
+ data = read(fid[key])
47
+ nk = map(x -> parse(Int64, convert(String, x)), split(key[2 : length(key) - 1], ','))
48
+ d_out[nk] = permutedims(data)
49
+ end
50
+ close(fid)
51
+ return d_out
52
+ end
53
+
54
+
55
+ function genlist(x)
56
+ return collect(range(x[1], stop = x[2], length = Int64(x[3])))
57
+ end
58
+
59
+
60
+ function k_data2num_ks(kdata::AbstractString)
61
+ return parse(Int64,split(kdata)[1])
62
+ end
63
+
64
+
65
+ function k_data2kpath(kdata::AbstractString)
66
+ return map(x->parse(Float64,x), split(kdata)[2:7])
67
+ end
68
+
69
+
70
+ function std_out_array(a::AbstractArray)
71
+ return string(map(x->string(x," "),a)...)
72
+ end
73
+
74
+
75
+ function main()
76
+ parsed_args = parse_commandline()
77
+
78
+ println(parsed_args["config"])
79
+ config = JSON.parsefile(parsed_args["config"])
80
+ calc_job = config["calc_job"]
81
+
82
+ if isfile(joinpath(parsed_args["input_dir"],"info.json"))
83
+ spinful = JSON.parsefile(joinpath(parsed_args["input_dir"],"info.json"))["isspinful"]
84
+ else
85
+ spinful = false
86
+ end
87
+
88
+ site_positions = readdlm(joinpath(parsed_args["input_dir"], "site_positions.dat"))
89
+ nsites = size(site_positions, 2)
90
+
91
+ orbital_types_f = open(joinpath(parsed_args["input_dir"], "orbital_types.dat"), "r")
92
+ site_norbits = zeros(nsites)
93
+ orbital_types = Vector{Vector{Int64}}()
94
+ for index_site = 1:nsites
95
+ orbital_type = parse.(Int64, split(readline(orbital_types_f)))
96
+ push!(orbital_types, orbital_type)
97
+ end
98
+ site_norbits = (x->sum(x .* 2 .+ 1)).(orbital_types) * (1 + spinful)
99
+ norbits = sum(site_norbits)
100
+ site_norbits_cumsum = cumsum(site_norbits)
101
+
102
+ rlat = readdlm(joinpath(parsed_args["input_dir"], "rlat.dat"))
103
+
104
+
105
+ @info "read h5"
106
+ begin_time = time()
107
+ hamiltonians_pred = _create_dict_h5(joinpath(parsed_args["input_dir"], "hamiltonians_pred.h5"))
108
+ overlaps = _create_dict_h5(joinpath(parsed_args["input_dir"], "overlaps.h5"))
109
+ println("Time for reading h5: ", time() - begin_time, "s")
110
+
111
+ H_R = Dict{Vector{Int64}, Matrix{default_dtype}}()
112
+ S_R = Dict{Vector{Int64}, Matrix{default_dtype}}()
113
+
114
+ @info "construct Hamiltonian and overlap matrix in the real space"
115
+ begin_time = time()
116
+ for key in collect(keys(hamiltonians_pred))
117
+ hamiltonian_pred = hamiltonians_pred[key]
118
+ if (key ∈ keys(overlaps))
119
+ overlap = overlaps[key]
120
+ else
121
+ # continue
122
+ overlap = zero(hamiltonian_pred)
123
+ end
124
+ if spinful
125
+ overlap = vcat(hcat(overlap,zeros(size(overlap))),hcat(zeros(size(overlap)),overlap)) # the readout overlap matrix only contains the upper-left block # TODO maybe drop the zeros?
126
+ end
127
+ R = key[1:3]; atom_i=key[4]; atom_j=key[5]
128
+
129
+ @assert (site_norbits[atom_i], site_norbits[atom_j]) == size(hamiltonian_pred)
130
+ @assert (site_norbits[atom_i], site_norbits[atom_j]) == size(overlap)
131
+ if !(R ∈ keys(H_R))
132
+ H_R[R] = zeros(default_dtype, norbits, norbits)
133
+ S_R[R] = zeros(default_dtype, norbits, norbits)
134
+ end
135
+ for block_matrix_i in 1:site_norbits[atom_i]
136
+ for block_matrix_j in 1:site_norbits[atom_j]
137
+ index_i = site_norbits_cumsum[atom_i] - site_norbits[atom_i] + block_matrix_i
138
+ index_j = site_norbits_cumsum[atom_j] - site_norbits[atom_j] + block_matrix_j
139
+ H_R[R][index_i, index_j] = hamiltonian_pred[block_matrix_i, block_matrix_j]
140
+ S_R[R][index_i, index_j] = overlap[block_matrix_i, block_matrix_j]
141
+ end
142
+ end
143
+ end
144
+ println("Time for constructing Hamiltonian and overlap matrix in the real space: ", time() - begin_time, " s")
145
+
146
+
147
+ if calc_job == "band"
148
+ fermi_level = config["fermi_level"]
149
+ k_data = config["k_data"]
150
+
151
+ ill_project = parsed_args["ill_project"] || ("ill_project" in keys(config) && config["ill_project"])
152
+ ill_threshold = max(parsed_args["ill_threshold"], get(config, "ill_threshold", 0.))
153
+
154
+ @info "calculate bands"
155
+ num_ks = k_data2num_ks.(k_data)
156
+ kpaths = k_data2kpath.(k_data)
157
+
158
+ egvals = zeros(Float64, norbits, sum(num_ks)[1])
159
+
160
+ begin_time = time()
161
+ idx_k = 1
162
+ for i = 1:size(kpaths, 1)
163
+ kpath = kpaths[i]
164
+ pnkpts = num_ks[i]
165
+ kxs = LinRange(kpath[1], kpath[4], pnkpts)
166
+ kys = LinRange(kpath[2], kpath[5], pnkpts)
167
+ kzs = LinRange(kpath[3], kpath[6], pnkpts)
168
+ for (kx, ky, kz) in zip(kxs, kys, kzs)
169
+ idx_k
170
+ H_k = zeros(default_dtype, norbits, norbits)
171
+ S_k = zeros(default_dtype, norbits, norbits)
172
+ for R in keys(H_R)
173
+ H_k += H_R[R] * exp(im*2π*([kx, ky, kz]⋅R))
174
+ S_k += S_R[R] * exp(im*2π*([kx, ky, kz]⋅R))
175
+ end
176
+ S_k = (S_k + S_k') / 2
177
+ H_k = (H_k + H_k') / 2
178
+ if ill_project
179
+ (egval_S, egvec_S) = eigen(Hermitian(S_k))
180
+ # egvec_S: shape (num_basis, num_bands)
181
+ project_index = abs.(egval_S) .> ill_threshold
182
+ if sum(project_index) != length(project_index)
183
+ # egval_S = egval_S[project_index]
184
+ egvec_S = egvec_S[:, project_index]
185
+ @warn "ill-conditioned eigenvalues detected, projected out $(length(project_index) - sum(project_index)) eigenvalues"
186
+ H_k = egvec_S' * H_k * egvec_S
187
+ S_k = egvec_S' * S_k * egvec_S
188
+ (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
189
+ egval = vcat(egval, fill(1e4, length(project_index) - sum(project_index)))
190
+ egvec = egvec_S * egvec
191
+ else
192
+ (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
193
+ end
194
+ else
195
+ (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
196
+ end
197
+ egvals[:, idx_k] = egval
198
+ println("Time for solving No.$idx_k eigenvalues at k = ", [kx, ky, kz], ": ", time() - begin_time, " s")
199
+ idx_k += 1
200
+ end
201
+ end
202
+
203
+ # output in openmx band format
204
+ f = open(joinpath(parsed_args["output_dir"], "openmx.Band"),"w")
205
+ println(f, norbits, " ", 0, " ", ev2Hartree * fermi_level)
206
+ openmx_rlat = reshape((rlat .* Bohr2Ang), 1, :)
207
+ println(f, std_out_array(openmx_rlat))
208
+ println(f, length(k_data))
209
+ for line in k_data
210
+ println(f,line)
211
+ end
212
+ idx_k = 1
213
+ for i = 1:size(kpaths, 1)
214
+ pnkpts = num_ks[i]
215
+ kstart = kpaths[i][1:3]
216
+ kend = kpaths[i][4:6]
217
+ k_list = zeros(Float64,pnkpts,3)
218
+ for alpha = 1:3
219
+ k_list[:,alpha] = genlist([kstart[alpha],kend[alpha],pnkpts])
220
+ end
221
+ for j = 1:pnkpts
222
+ idx_k
223
+ kvec = k_list[j,:]
224
+ println(f, norbits, " ", std_out_array(kvec))
225
+ println(f, std_out_array(ev2Hartree * egvals[:, idx_k]))
226
+ idx_k += 1
227
+ end
228
+ end
229
+ close(f)
230
+ end
231
+ end
232
+
233
+
234
+ main()