Koulb commited on
Commit
e12372e
·
verified ·
1 Parent(s): 423c9c9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. 2_training/forces/_phonopy_freqs.json +1 -0
  3. 2_training/forces/diamond_ase_ref.nequip.pt2 +3 -0
  4. 2_training/forces/phonon_comparison.png +3 -0
  5. 2_training/forces/train_force.log +38 -0
  6. 2_training/hamiltonian/infer_sc/dataset/00/overlaps.h5 +3 -0
  7. 2_training/hamiltonian/infer_uc/dataset/00/element.dat +2 -0
  8. 2_training/hamiltonian/infer_uc/dataset/00/graph.pkl +3 -0
  9. 2_training/hamiltonian/infer_uc/dataset/00/hamiltonians.h5 +3 -0
  10. 2_training/hamiltonian/infer_uc/dataset/00/hamiltonians_pred.h5 +3 -0
  11. 2_training/hamiltonian/infer_uc/dataset/00/info.json +1 -0
  12. 2_training/hamiltonian/infer_uc/dataset/00/lat.dat +3 -0
  13. 2_training/hamiltonian/infer_uc/dataset/00/orbital_types.dat +2 -0
  14. 2_training/hamiltonian/infer_uc/dataset/00/overlaps.h5 +3 -0
  15. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/config.ini +82 -0
  16. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/result.txt +86 -0
  17. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__init__.py +10 -0
  18. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/__init__.cpython-312.pyc +0 -0
  19. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/data.cpython-312.pyc +0 -0
  20. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/graph.cpython-312.pyc +0 -0
  21. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/kernel.cpython-312.pyc +0 -0
  22. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/model.cpython-312.pyc +0 -0
  23. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/rotate.cpython-312.pyc +0 -0
  24. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/utils.cpython-312.pyc +0 -0
  25. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/data.py +217 -0
  26. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/default.ini +88 -0
  27. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__init__.py +1 -0
  28. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/__init__.cpython-312.pyc +0 -0
  29. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/rmnet.cpython-312.pyc +0 -0
  30. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/license.txt +1 -0
  31. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/rmnet.py +105 -0
  32. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__init__.py +2 -0
  33. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/__init__.cpython-312.pyc +0 -0
  34. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/diff_group_norm.cpython-312.pyc +0 -0
  35. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/graph_norm.cpython-312.pyc +0 -0
  36. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/diff_group_norm.py +109 -0
  37. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/graph_norm.py +60 -0
  38. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/license.txt +22 -0
  39. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__init__.py +1 -0
  40. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/__init__.cpython-312.pyc +0 -0
  41. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/lattice.cpython-312.pyc +0 -0
  42. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/lattice.py +71 -0
  43. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/license.txt +22 -0
  44. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__init__.py +1 -0
  45. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/__init__.cpython-312.pyc +0 -0
  46. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/acsf.cpython-312.pyc +0 -0
  47. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/acsf.py +50 -0
  48. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/license.txt +35 -0
  49. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__init__.py +1 -0
  50. 2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/__init__.cpython-312.pyc +0 -0
.gitattributes CHANGED
@@ -230,3 +230,4 @@ aobasis/siesta.DM filter=lfs diff=lfs merge=lfs -text
230
  1_data_prepare/data/disp-14/scf/diamond.save/charge-density.dat filter=lfs diff=lfs merge=lfs -text
231
  1_data_prepare/data/disp-07/scf/VSC filter=lfs diff=lfs merge=lfs -text
232
  1_data_prepare/data/disp-07/scf/diamond.save/charge-density.dat filter=lfs diff=lfs merge=lfs -text
 
 
230
  1_data_prepare/data/disp-14/scf/diamond.save/charge-density.dat filter=lfs diff=lfs merge=lfs -text
231
  1_data_prepare/data/disp-07/scf/VSC filter=lfs diff=lfs merge=lfs -text
232
  1_data_prepare/data/disp-07/scf/diamond.save/charge-density.dat filter=lfs diff=lfs merge=lfs -text
233
+ 2_training/forces/diamond_ase_ref.nequip.pt2 filter=lfs diff=lfs merge=lfs -text
2_training/forces/_phonopy_freqs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"freqs_thz": [-6.176924148037084e-06, -6.167703510703962e-06, -6.1374103546125556e-06, 38.86256568564987, 38.86256568564988, 38.86256568564988]}
2_training/forces/diamond_ase_ref.nequip.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a8ffd2d4feb583ae3c0f31b67e811967895e9b3390058a7c1a653e229bc8cd
3
+ size 10289338
2_training/forces/phonon_comparison.png ADDED

Git LFS Details

  • SHA256: fcf45a338a03b006c6d895f7bd9ea3a666b8de3b327dd74930b819689b08816d
  • Pointer size: 130 Bytes
  • Size of remote file: 32.9 kB
2_training/forces/train_force.log ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Step 1: Building forces dataset...
2
+ Dataset exists: /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/dataset.xyz
3
+
4
+ Step 2: Running DFPT at q=Gamma...
5
+ DFPT output exists: /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/dfpt/ph.out
6
+
7
+ Step 3: NequIP training...
8
+ --skip-training: using existing checkpoint: /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/outputs/2026-03-03/13-29-04/best.ckpt
9
+
10
+ Step 4: Compiling model...
11
+ Compiled model exists: /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/diamond_ase.nequip.pth
12
+
13
+ Step 5: Running phonopy + NequIP at Gamma...
14
+ Running phonopy + NequIP in deeph env ...
15
+ /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/_phonopy_launcher.py:28: DeprecationWarning: PhonopyAtoms.get_chemical_symbols() is deprecated. Use symbols attribute instead.
16
+ a = Atoms(symbols=sc.get_chemical_symbols(),
17
+ /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/_phonopy_launcher.py:29: DeprecationWarning: PhonopyAtoms.get_positions() is deprecated. Use positions attribute instead.
18
+ positions=sc.get_positions(),
19
+ /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/_phonopy_launcher.py:30: DeprecationWarning: PhonopyAtoms.get_cell() is deprecated. Use cell attribute instead.
20
+ cell=sc.get_cell(), pbc=True)
21
+ Phonopy: 1 displacements (128 atoms each)
22
+ [1/1] |F|max=0.3194 eV/A
23
+ Gamma freqs written to /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/_phonopy_freqs.json
24
+
25
+ Step 6: Comparing phonons...
26
+ Mode DFPT (THz) DFPT (cm-1) ML (THz) ML (cm-1) Err (cm-1)
27
+ -----------------------------------------------------------------
28
+ 1 6.9242 230.97 -0.0000 -0.00 -230.97
29
+ 2 6.9242 230.97 -0.0000 -0.00 -230.97
30
+ 3 6.9242 230.97 -0.0000 -0.00 -230.97
31
+ 4 40.9717 1366.67 38.8626 1296.32 -70.35
32
+ 5 40.9717 1366.67 38.8626 1296.32 -70.35
33
+ 6 40.9717 1366.67 38.8626 1296.32 -70.35
34
+
35
+ Optical: DFPT=23.95 THz, ML=38.86 THz, err=+62.3%
36
+ Saved: /home/apolyukhin/Development/epc_ml/example/diamond/2_training/forces/phonon_comparison.png
37
+
38
+ train_force.py done.
2_training/hamiltonian/infer_sc/dataset/00/overlaps.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f8fb67509a2908d6fbcdc098aa26a8a4af716b76aebda4ff04b872cba43859
3
+ size 3986765
2_training/hamiltonian/infer_uc/dataset/00/element.dat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 6
2
+ 6
2_training/hamiltonian/infer_uc/dataset/00/graph.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:589308142f311a9e1f063af5f5db20bb434be2fb921ab1a5a1fb8da79b2dadc8
3
+ size 8317809
2_training/hamiltonian/infer_uc/dataset/00/hamiltonians.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:606d2f8997a1fb3ee3847fcb1a519ee249f8a7e88d323b694daf7858ab43943d
3
+ size 558800
2_training/hamiltonian/infer_uc/dataset/00/hamiltonians_pred.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf17707c62b11e50879ce078e8582ca30180d512206b2f5fc52deee5f2faa823
3
+ size 519600
2_training/hamiltonian/infer_uc/dataset/00/info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"isspinful": false}
2_training/hamiltonian/infer_uc/dataset/00/lat.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 0.000000000000000000e+00 1.783499998856923785e+00 1.783499998856923785e+00
2
+ 1.783499998856923785e+00 0.000000000000000000e+00 1.783499998856923785e+00
3
+ 1.783499998856923785e+00 1.783499998856923785e+00 0.000000000000000000e+00
2_training/hamiltonian/infer_uc/dataset/00/orbital_types.dat ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 0 1 1 2
2
+ 0 0 1 1 2
2_training/hamiltonian/infer_uc/dataset/00/overlaps.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb1a42ebec020dbdbe6e8b9e8ece9f70d737db9e5e6108003c759c0baad85c3d
3
+ size 499888
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/config.ini ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [basic]
2
+ graph_dir = /home/apolyukhin/scripts/ml/diamond-qe/deeph-data/graph
3
+ save_dir = /home/apolyukhin/scripts/ml/diamond-qe/pristine-uc/reconstruction/aohamiltonian/pred_ham_std
4
+ raw_dir = /home/apolyukhin/scripts/ml/diamond-qe/deeph-data/preprocess
5
+ dataset_name = diamond_qe
6
+ only_get_graph = False
7
+ interface = h5
8
+ target = hamiltonian
9
+ disable_cuda = True
10
+ device = cpu
11
+ num_threads = -1
12
+ save_to_time_folder = False
13
+ save_csv = True
14
+ tb_writer = False
15
+ seed = 42
16
+ multiprocessing = 0
17
+ orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
18
+ o_component = H
19
+ energy_component = summation
20
+ max_element = -1
21
+ statistics = False
22
+ normalizer = False
23
+ boxcox = False
24
+
25
+ [graph]
26
+ radius = -1.0
27
+ max_num_nbr = 0
28
+ create_from_dft = True
29
+ if_lcmp_graph = True
30
+ separate_onsite = False
31
+ new_sp = False
32
+
33
+ [train]
34
+ epochs = 5000
35
+ pretrained =
36
+ resume =
37
+ train_ratio = 0.6
38
+ val_ratio = 0.2
39
+ test_ratio = 0.2
40
+ early_stopping_loss = 0.0
41
+ early_stopping_loss_epoch = [0.000000, 500]
42
+ revert_then_decay = True
43
+ revert_threshold = 30
44
+ revert_decay_epoch = [800, 2000, 3000, 4000]
45
+ revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
46
+ clip_grad = True
47
+ clip_grad_value = 4.2
48
+ switch_sgd = False
49
+ switch_sgd_lr = 1e-4
50
+ switch_sgd_epoch = -1
51
+
52
+ [hyperparameter]
53
+ batch_size = 1
54
+ dtype = float32
55
+ optimizer = adam
56
+ learning_rate = 0.001
57
+ lr_scheduler =
58
+ lr_milestones = []
59
+ momentum = 0.9
60
+ weight_decay = 0
61
+ criterion = MaskMSELoss
62
+ retain_edge_fea = True
63
+ lambda_eij = 0.0
64
+ lambda_ei = 0.1
65
+ lambda_etot = 0.0
66
+
67
+ [network]
68
+ atom_fea_len = 64
69
+ edge_fea_len = 128
70
+ gauss_stop = 6.0
71
+ num_l = 4
72
+ aggr = add
73
+ distance_expansion = GaussianBasis
74
+ if_exp = True
75
+ if_multiplelinear = False
76
+ if_edge_update = True
77
+ if_lcmp = True
78
+ normalization = LayerNorm
79
+ atom_update_net = PAINN
80
+ trainable_gaussians = False
81
+ type_affine = False
82
+
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/result.txt ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ====== CONFIG ======
2
+ [basic]
3
+ graph_dir=/home/apolyukhin/scripts/ml/diamond-qe/deeph-data/graph
4
+ save_dir=/home/apolyukhin/scripts/ml/diamond-qe/pristine-uc/reconstruction/aohamiltonian/pred_ham_std
5
+ raw_dir=/home/apolyukhin/scripts/ml/diamond-qe/deeph-data/preprocess
6
+ dataset_name=diamond_qe
7
+ only_get_graph=False
8
+ interface=h5
9
+ target=hamiltonian
10
+ disable_cuda=True
11
+ device=cpu
12
+ num_threads=-1
13
+ save_to_time_folder=False
14
+ save_csv=True
15
+ tb_writer=False
16
+ seed=42
17
+ multiprocessing=0
18
+ orbital=[{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
19
+ o_component=H
20
+ energy_component=summation
21
+ max_element=-1
22
+ statistics=False
23
+ normalizer=False
24
+ boxcox=False
25
+
26
+ [graph]
27
+ radius=-1.0
28
+ max_num_nbr=0
29
+ create_from_dft=True
30
+ if_lcmp_graph=True
31
+ separate_onsite=False
32
+ new_sp=False
33
+
34
+ [train]
35
+ epochs=5000
36
+ pretrained=
37
+ resume=
38
+ train_ratio=0.6
39
+ val_ratio=0.2
40
+ test_ratio=0.2
41
+ early_stopping_loss=0.0
42
+ early_stopping_loss_epoch=[0.000000, 500]
43
+ revert_then_decay=True
44
+ revert_threshold=30
45
+ revert_decay_epoch=[800, 2000, 3000, 4000]
46
+ revert_decay_gamma=[0.4, 0.5, 0.5, 0.4]
47
+ clip_grad=True
48
+ clip_grad_value=4.2
49
+ switch_sgd=False
50
+ switch_sgd_lr=1e-4
51
+ switch_sgd_epoch=-1
52
+
53
+ [hyperparameter]
54
+ batch_size=1
55
+ dtype=float32
56
+ optimizer=adam
57
+ learning_rate=0.001
58
+ lr_scheduler=
59
+ lr_milestones=[]
60
+ momentum=0.9
61
+ weight_decay=0
62
+ criterion=MaskMSELoss
63
+ retain_edge_fea=True
64
+ lambda_eij=0.0
65
+ lambda_ei=0.1
66
+ lambda_etot=0.0
67
+
68
+ [network]
69
+ atom_fea_len=64
70
+ edge_fea_len=128
71
+ gauss_stop=6.0
72
+ num_l=4
73
+ aggr=add
74
+ distance_expansion=GaussianBasis
75
+ if_exp=True
76
+ if_multiplelinear=False
77
+ if_edge_update=True
78
+ if_lcmp=True
79
+ normalization=LayerNorm
80
+ atom_update_net=PAINN
81
+ trainable_gaussians=False
82
+ type_affine=False
83
+
84
+ => load best checkpoint (epoch 3217)
85
+ => Atomic types: [6], spinful: False, the number of atomic types: 1.
86
+ Save processed graph to /home/apolyukhin/scripts/ml/diamond-qe/pristine-uc/reconstruction/aohamiltonian/graph.pkl, cost 0.1220557689666748 seconds
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .data import HData
2
+ from .model import HGNN, ExpBernsteinBasis
3
+ from .utils import print_args, Logger, MaskMSELoss, MaskMAELoss, write_ham_npz, write_ham, write_ham_h5, get_config, \
4
+ get_inference_config, get_preprocess_config
5
+ from .graph import Collater, collate_fn, get_graph, load_orbital_types
6
+ from .kernel import DeepHKernel
7
+ from .preprocess import get_rc, OijLoad, GetEEiEij, abacus_parse, siesta_parse
8
+ from .rotate import get_rh, rotate_back, Rotate, dtype_dict
9
+
10
+ __version__ = "0.2.2"
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (925 Bytes). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/data.cpython-312.pyc ADDED
Binary file (12.2 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/graph.cpython-312.pyc ADDED
Binary file (71.1 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/kernel.cpython-312.pyc ADDED
Binary file (61.3 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/model.cpython-312.pyc ADDED
Binary file (38.4 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/rotate.cpython-312.pyc ADDED
Binary file (18.7 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/__pycache__/utils.cpython-312.pyc ADDED
Binary file (13.3 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/data.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import os
3
+ import time
4
+ import tqdm
5
+
6
+ from pymatgen.core.structure import Structure
7
+ import numpy as np
8
+ import torch
9
+ from torch_geometric.data import InMemoryDataset
10
+ from pathos.multiprocessing import ProcessingPool as Pool
11
+
12
+ from .graph import get_graph
13
+
14
+
15
+ class HData(InMemoryDataset):
16
+ def __init__(self, raw_data_dir: str, graph_dir: str, interface: str, target: str,
17
+ dataset_name: str, multiprocessing: int, radius, max_num_nbr,
18
+ num_l, max_element, create_from_DFT, if_lcmp_graph, separate_onsite, new_sp,
19
+ default_dtype_torch, nums: int = None, transform=None, pre_transform=None, pre_filter=None):
20
+ """
21
+ when interface == 'h5',
22
+ raw_data_dir
23
+ ├── 00
24
+ │ ├──rh.h5 / rdm.h5
25
+ │ ├──rc.h5
26
+ │ ├──element.dat
27
+ │ ├──orbital_types.dat
28
+ │ ├──site_positions.dat
29
+ │ ├──lat.dat
30
+ │ └──info.json
31
+ ├── 01
32
+ │ ├──rh.h5 / rdm.h5
33
+ │ ├──rc.h5
34
+ │ ├──element.dat
35
+ │ ├──orbital_types.dat
36
+ │ ├──site_positions.dat
37
+ │ ├──lat.dat
38
+ │ └──info.json
39
+ ├── 02
40
+ │ ├──rh.h5 / rdm.h5
41
+ │ ├──rc.h5
42
+ │ ├──element.dat
43
+ │ ├──orbital_types.dat
44
+ │ ├──site_positions.dat
45
+ │ ├──lat.dat
46
+ │ └──info.json
47
+ ├── ...
48
+ """
49
+ self.raw_data_dir = raw_data_dir
50
+ assert dataset_name.find('-') == -1, '"-" can not be included in the dataset name'
51
+ if create_from_DFT:
52
+ way_create_graph = 'FromDFT'
53
+ else:
54
+ way_create_graph = f'{radius}r{max_num_nbr}mn'
55
+ if if_lcmp_graph:
56
+ lcmp_str = f'{num_l}l'
57
+ else:
58
+ lcmp_str = 'WithoutLCMP'
59
+ if separate_onsite is True:
60
+ onsite_str = '-SeparateOnsite'
61
+ else:
62
+ onsite_str = ''
63
+ if new_sp:
64
+ new_sp_str = '-NewSP'
65
+ else:
66
+ new_sp_str = ''
67
+ if target == 'hamiltonian':
68
+ title = 'HGraph'
69
+ else:
70
+ raise ValueError('Unknown prediction target: {}'.format(target))
71
+ graph_file_name = f'{title}-{interface}-{dataset_name}-{lcmp_str}-{way_create_graph}{onsite_str}{new_sp_str}.pkl'
72
+ self.data_file = os.path.join(graph_dir, graph_file_name)
73
+ os.makedirs(graph_dir, exist_ok=True)
74
+ self.data, self.slices = None, None
75
+ self.interface = interface
76
+ self.target = target
77
+ self.dataset_name = dataset_name
78
+ self.multiprocessing = multiprocessing
79
+ self.radius = radius
80
+ self.max_num_nbr = max_num_nbr
81
+ self.num_l = num_l
82
+ self.create_from_DFT = create_from_DFT
83
+ self.if_lcmp_graph = if_lcmp_graph
84
+ self.separate_onsite = separate_onsite
85
+ self.new_sp = new_sp
86
+ self.default_dtype_torch = default_dtype_torch
87
+
88
+ self.nums = nums
89
+ self.transform = transform
90
+ self.pre_transform = pre_transform
91
+ self.pre_filter = pre_filter
92
+ self.__indices__ = None
93
+ self.__data_list__ = None
94
+ self._indices = None
95
+ self._data_list = None
96
+
97
+ print(f'Graph data file: {graph_file_name}')
98
+ if os.path.exists(self.data_file):
99
+ print('Use existing graph data file')
100
+ else:
101
+ print('Process new data file......')
102
+ self.process()
103
+ begin = time.time()
104
+ try:
105
+ loaded_data = torch.load(self.data_file)
106
+ except AttributeError:
107
+ raise RuntimeError('Error in loading graph data file, try to delete it and generate the graph file with the current version of PyG')
108
+ if len(loaded_data) == 2:
109
+ warnings.warn('You are using the graph data file with an old version')
110
+ self.data, self.slices = loaded_data
111
+ self.info = {
112
+ "spinful": False,
113
+ "index_to_Z": torch.arange(max_element + 1),
114
+ "Z_to_index": torch.arange(max_element + 1),
115
+ }
116
+ elif len(loaded_data) == 3:
117
+ self.data, self.slices, tmp = loaded_data
118
+ if isinstance(tmp, dict):
119
+ self.info = tmp
120
+ print(f"Atomic types: {self.info['index_to_Z'].tolist()}")
121
+ else:
122
+ warnings.warn('You are using an old version of the graph data file')
123
+ self.info = {
124
+ "spinful": tmp,
125
+ "index_to_Z": torch.arange(max_element + 1),
126
+ "Z_to_index": torch.arange(max_element + 1),
127
+ }
128
+ print(f'Finish loading the processed {len(self)} structures (spinful: {self.info["spinful"]}, '
129
+ f'the number of atomic types: {len(self.info["index_to_Z"])}), cost {time.time() - begin:.0f} seconds')
130
+
131
+ def process_worker(self, folder, **kwargs):
132
+ stru_id = os.path.split(folder)[-1]
133
+
134
+ structure = Structure(np.loadtxt(os.path.join(folder, 'lat.dat')).T,
135
+ np.loadtxt(os.path.join(folder, 'element.dat')),
136
+ np.loadtxt(os.path.join(folder, 'site_positions.dat')).T,
137
+ coords_are_cartesian=True,
138
+ to_unit_cell=False)
139
+
140
+ cart_coords = torch.tensor(structure.cart_coords, dtype=self.default_dtype_torch)
141
+ frac_coords = torch.tensor(structure.frac_coords, dtype=self.default_dtype_torch)
142
+ numbers = torch.tensor(structure.atomic_numbers)
143
+ structure.lattice.matrix.setflags(write=True)
144
+ lattice = torch.tensor(structure.lattice.matrix, dtype=self.default_dtype_torch)
145
+ if self.target == 'E_ij':
146
+ huge_structure = True
147
+ else:
148
+ huge_structure = False
149
+ return get_graph(cart_coords, frac_coords, numbers, stru_id, r=self.radius, max_num_nbr=self.max_num_nbr,
150
+ numerical_tol=1e-8, lattice=lattice, default_dtype_torch=self.default_dtype_torch,
151
+ tb_folder=folder, interface=self.interface, num_l=self.num_l,
152
+ create_from_DFT=self.create_from_DFT, if_lcmp_graph=self.if_lcmp_graph,
153
+ separate_onsite=self.separate_onsite,
154
+ target=self.target, huge_structure=huge_structure, if_new_sp=self.new_sp, **kwargs)
155
+
156
+ def process(self):
157
+ begin = time.time()
158
+ folder_list = []
159
+ for root, dirs, files in os.walk(self.raw_data_dir):
160
+ if (self.interface == 'h5' and 'rc.h5' in files) or (
161
+ self.interface == 'npz' and 'rc.npz' in files):
162
+ folder_list.append(root)
163
+ folder_list = sorted(folder_list)
164
+ folder_list = folder_list[: self.nums]
165
+ if self.dataset_name == 'graphene_450':
166
+ folder_list = folder_list[500:5000:10]
167
+ if self.dataset_name == 'graphene_1500':
168
+ folder_list = folder_list[500:5000:3]
169
+ if self.dataset_name == 'bp_bilayer':
170
+ folder_list = folder_list[:600]
171
+ assert len(folder_list) != 0, "Can not find any structure"
172
+ print('Found %d structures, have cost %d seconds' % (len(folder_list), time.time() - begin))
173
+
174
+ if self.multiprocessing == 0:
175
+ print(f'Use multiprocessing (nodes = num_processors x num_threads = 1 x {torch.get_num_threads()})')
176
+ data_list = [self.process_worker(folder) for folder in tqdm.tqdm(folder_list)]
177
+ else:
178
+ pool_dict = {} if self.multiprocessing < 0 else {'nodes': self.multiprocessing}
179
+ # BS (2023.06.06):
180
+ # The keyword "num_threads" in kernel.py can be used to set the torch threads.
181
+ # The multiprocessing in the "process_worker" is in contradiction with the num_threads utilized in torch.
182
+ # To avoid this conflict, I limit the number of torch threads to one,
183
+ # and recover it when finishing the process_worker.
184
+ torch_num_threads = torch.get_num_threads()
185
+ torch.set_num_threads(1)
186
+
187
+ with Pool(**pool_dict) as pool:
188
+ nodes = pool.nodes
189
+ print(f'Use multiprocessing (nodes = num_processors x num_threads = {nodes} x {torch.get_num_threads()})')
190
+ data_list = list(tqdm.tqdm(pool.imap(self.process_worker, folder_list), total=len(folder_list)))
191
+ torch.set_num_threads(torch_num_threads)
192
+ print('Finish processing %d structures, have cost %d seconds' % (len(data_list), time.time() - begin))
193
+
194
+ if self.pre_filter is not None:
195
+ data_list = [d for d in data_list if self.pre_filter(d)]
196
+ if self.pre_transform is not None:
197
+ data_list = [self.pre_transform(d) for d in data_list]
198
+
199
+ index_to_Z, Z_to_index = self.element_statistics(data_list)
200
+ spinful = data_list[0].spinful
201
+ for d in data_list:
202
+ assert spinful == d.spinful
203
+
204
+ data, slices = self.collate(data_list)
205
+ torch.save((data, slices, dict(spinful=spinful, index_to_Z=index_to_Z, Z_to_index=Z_to_index)), self.data_file)
206
+ print('Finish saving %d structures to %s, have cost %d seconds' % (
207
+ len(data_list), self.data_file, time.time() - begin))
208
+
209
+ def element_statistics(self, data_list):
210
+ index_to_Z, inverse_indices = torch.unique(data_list[0].x, sorted=True, return_inverse=True)
211
+ Z_to_index = torch.full((100,), -1, dtype=torch.int64)
212
+ Z_to_index[index_to_Z] = torch.arange(len(index_to_Z))
213
+
214
+ for data in data_list:
215
+ data.x = Z_to_index[data.x]
216
+
217
+ return index_to_Z, Z_to_index
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/default.ini ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [basic]
2
+ graph_dir = /your/own/path
3
+ save_dir = /your/own/path
4
+ raw_dir = /your/own/path
5
+ dataset_name = your_own_name
6
+ only_get_graph = False
7
+ ;choices = ['h5', 'npz']
8
+ interface = h5
9
+ target = hamiltonian
10
+ disable_cuda = False
11
+ device = cuda:0
12
+ ;-1 for cpu_count(logical=False) // torch.cuda.device_count()
13
+ num_threads = -1
14
+ save_to_time_folder = True
15
+ save_csv = False
16
+ tb_writer = True
17
+ seed = 42
18
+ multiprocessing = 0
19
+ orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
20
+ O_component = H
21
+ energy_component = summation
22
+ max_element = -1
23
+ statistics = False
24
+ normalizer = False
25
+ boxcox = False
26
+
27
+ [graph]
28
+ radius = -1.0
29
+ max_num_nbr = 0
30
+ create_from_DFT = True
31
+ if_lcmp_graph = True
32
+ separate_onsite = False
33
+ new_sp = False
34
+
35
+ [train]
36
+ epochs = 4000
37
+ pretrained =
38
+ resume =
39
+ train_ratio = 0.6
40
+ val_ratio = 0.2
41
+ test_ratio = 0.2
42
+ early_stopping_loss = 0.0
43
+ early_stopping_loss_epoch = [0.000000, 500]
44
+ revert_then_decay = True
45
+ revert_threshold = 30
46
+ revert_decay_epoch = [500, 2000, 3000]
47
+ revert_decay_gamma = [0.4, 0.5, 0.5]
48
+ clip_grad = True
49
+ clip_grad_value = 4.2
50
+ switch_sgd = False
51
+ switch_sgd_lr = 1e-4
52
+ switch_sgd_epoch = -1
53
+
54
+ [hyperparameter]
55
+ batch_size = 3
56
+ dtype = float32
57
+ ;choices = ['sgd', 'sgdm', 'adam', 'lbfgs']
58
+ optimizer = adam
59
+ ;initial learning rate
60
+ learning_rate = 0.001
61
+ ;choices = ['', 'MultiStepLR', 'ReduceLROnPlateau', 'CyclicLR']
62
+ lr_scheduler =
63
+ lr_milestones = []
64
+ momentum = 0.9
65
+ weight_decay = 0
66
+ criterion = MaskMSELoss
67
+ retain_edge_fea = True
68
+ lambda_Eij = 0.0
69
+ lambda_Ei = 0.1
70
+ lambda_Etot = 0.0
71
+
72
+ [network]
73
+ atom_fea_len = 64
74
+ edge_fea_len = 128
75
+ gauss_stop = 6
76
+ ;The number of angular quantum numbers that spherical harmonic functions have
77
+ num_l = 5
78
+ aggr = add
79
+ distance_expansion = GaussianBasis
80
+ if_exp = True
81
+ if_MultipleLinear = False
82
+ if_edge_update = True
83
+ if_lcmp = True
84
+ normalization = LayerNorm
85
+ ;choices = ['CGConv', 'GAT', 'PAINN']
86
+ atom_update_net = CGConv
87
+ trainable_gaussians = False
88
+ type_affine = False
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .rmnet import RBF, cosine_cutoff, ShiftedSoftplus, _eps
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (263 Bytes). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/__pycache__/rmnet.cpython-312.pyc ADDED
Binary file (4.67 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/license.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ The code in this folder was obtained from "https://github.com/sakuraiiiii/HermNet"
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_HermNet/rmnet.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch import nn, Tensor
5
+ import numpy as np
6
+
7
+
8
+ _eps = 1e-3
9
+
10
+ r"""Tricks: Introducing the parameter `_eps` is to avoid NaN.
11
+ In HVNet and HTNet, a subgraph will be extracted to calculate angles.
12
+ And with all the nodes still be included in the subgraph,
13
+ each hidden state in such a subgraph will contain 0 value.
14
+ In `painn`, the calculation w.r.t $r / \parallel r \parallel$ will be taken.
15
+ If just alternate $r / \parallel r \parallel$ with $r / (\parallel r \parallel + _eps)$,
16
+ NaN will still occur in during the training.
17
+ Considering the following example,
18
+ $$
19
+ (\frac{x}{r+_eps})^\prime = \frac{r+b-\frac{x^2}{r}}{(r+b)^2}
20
+ $$
21
+ where $r = \sqrt{x^2+y^2+z^2}$. It is obvious that NaN will occur.
22
+ Thus the solution is change the norm $r$ as $r^\prime = \sqrt(x^2+y^2+z^2+_eps)$.
23
+ Since $r$ is rotational invariant, $r^2$ is rotational invariant.
24
+ Obviously, $\sqrt(r^2 + _eps)$ is rotational invariant.
25
+ """
26
+ class RBF(nn.Module):
27
+ r"""Radial basis function.
28
+ A modified version of feature engineering in `DimeNet`,
29
+ which is used in `PAINN`.
30
+
31
+ Parameters
32
+ ----------
33
+ rc : float
34
+ Cutoff radius
35
+ l : int
36
+ Parameter in feature engineering in DimeNet
37
+ """
38
+ def __init__(self, rc: float, l: int):
39
+ super(RBF, self).__init__()
40
+ self.rc = rc
41
+ self.l = l
42
+
43
+ def forward(self, x: Tensor):
44
+ ls = torch.arange(1, self.l + 1).float().to(x.device)
45
+ norm = torch.sqrt((x ** 2).sum(dim=-1) + _eps).unsqueeze(-1)
46
+ return torch.sin(math.pi / self.rc * norm@ls.unsqueeze(0)) / norm
47
+
48
+
49
+ class cosine_cutoff(nn.Module):
50
+ r"""Cutoff function in https://aip.scitation.org/doi/pdf/10.1063/1.3553717.
51
+
52
+ Parameters
53
+ ----------
54
+ rc : float
55
+ Cutoff radius
56
+ """
57
+ def __init__(self, rc: float):
58
+ super(cosine_cutoff, self).__init__()
59
+ self.rc = rc
60
+
61
+ def forward(self, x: Tensor):
62
+ norm = torch.norm(x, dim=-1, keepdim=True) + _eps
63
+ return 0.5 * (torch.cos(math.pi * norm / self.rc) + 1)
64
+
65
+ class ShiftedSoftplus(nn.Module):
66
+ r"""
67
+
68
+ Description
69
+ -----------
70
+ Applies the element-wise function:
71
+
72
+ .. math::
73
+ \text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift})
74
+
75
+ Attributes
76
+ ----------
77
+ beta : int
78
+ :math:`\beta` value for the mathematical formulation. Default to 1.
79
+ shift : int
80
+ :math:`\text{shift}` value for the mathematical formulation. Default to 2.
81
+ """
82
+ def __init__(self, beta=1, shift=2, threshold=20):
83
+ super(ShiftedSoftplus, self).__init__()
84
+
85
+ self.shift = shift
86
+ self.softplus = nn.Softplus(beta=beta, threshold=threshold)
87
+
88
+ def forward(self, inputs):
89
+ """
90
+
91
+ Description
92
+ -----------
93
+ Applies the activation function.
94
+
95
+ Parameters
96
+ ----------
97
+ inputs : float32 tensor of shape (N, *)
98
+ * denotes any number of additional dimensions.
99
+
100
+ Returns
101
+ -------
102
+ float32 tensor of shape (N, *)
103
+ Result of applying the activation function to the input.
104
+ """
105
+ return self.softplus(inputs) - np.log(float(self.shift))
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .graph_norm import GraphNorm
2
+ from .diff_group_norm import DiffGroupNorm
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (263 Bytes). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/diff_group_norm.cpython-312.pyc ADDED
Binary file (6.43 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/__pycache__/graph_norm.cpython-312.pyc ADDED
Binary file (3.76 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/diff_group_norm.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+ from torch.nn import Linear, BatchNorm1d
4
+
5
+
6
+ class DiffGroupNorm(torch.nn.Module):
7
+ r"""The differentiable group normalization layer from the `"Towards Deeper
8
+ Graph Neural Networks with Differentiable Group Normalization"
9
+ <https://arxiv.org/abs/2006.06972>`_ paper, which normalizes node features
10
+ group-wise via a learnable soft cluster assignment
11
+
12
+ .. math::
13
+
14
+ \mathbf{S} = \text{softmax} (\mathbf{X} \mathbf{W})
15
+
16
+ where :math:`\mathbf{W} \in \mathbb{R}^{F \times G}` denotes a trainable
17
+ weight matrix mapping each node into one of :math:`G` clusters.
18
+ Normalization is then performed group-wise via:
19
+
20
+ .. math::
21
+
22
+ \mathbf{X}^{\prime} = \mathbf{X} + \lambda \sum_{i = 1}^G
23
+ \text{BatchNorm}(\mathbf{S}[:, i] \odot \mathbf{X})
24
+
25
+ Args:
26
+ in_channels (int): Size of each input sample :math:`F`.
27
+ groups (int): The number of groups :math:`G`.
28
+ lamda (float, optional): The balancing factor :math:`\lambda` between
29
+ input embeddings and normalized embeddings. (default: :obj:`0.01`)
30
+ eps (float, optional): A value added to the denominator for numerical
31
+ stability. (default: :obj:`1e-5`)
32
+ momentum (float, optional): The value used for the running mean and
33
+ running variance computation. (default: :obj:`0.1`)
34
+ affine (bool, optional): If set to :obj:`True`, this module has
35
+ learnable affine parameters :math:`\gamma` and :math:`\beta`.
36
+ (default: :obj:`True`)
37
+ track_running_stats (bool, optional): If set to :obj:`True`, this
38
+ module tracks the running mean and variance, and when set to
39
+ :obj:`False`, this module does not track such statistics and always
40
+ uses batch statistics in both training and eval modes.
41
+ (default: :obj:`True`)
42
+ """
43
+ def __init__(self, in_channels, groups, lamda=0.01, eps=1e-5, momentum=0.1,
44
+ affine=True, track_running_stats=True):
45
+ super(DiffGroupNorm, self).__init__()
46
+
47
+ self.in_channels = in_channels
48
+ self.groups = groups
49
+ self.lamda = lamda
50
+
51
+ self.lin = Linear(in_channels, groups, bias=False)
52
+ self.norm = BatchNorm1d(groups * in_channels, eps, momentum, affine,
53
+ track_running_stats)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self):
58
+ self.lin.reset_parameters()
59
+ self.norm.reset_parameters()
60
+
61
+ def forward(self, x: Tensor) -> Tensor:
62
+ """"""
63
+ F, G = self.in_channels, self.groups
64
+
65
+ s = self.lin(x).softmax(dim=-1) # [N, G]
66
+ out = s.unsqueeze(-1) * x.unsqueeze(-2) # [N, G, F]
67
+ out = self.norm(out.view(-1, G * F)).view(-1, G, F).sum(-2) # [N, F]
68
+
69
+ return x + self.lamda * out
70
+
71
+ @staticmethod
72
+ def group_distance_ratio(x: Tensor, y: Tensor, eps: float = 1e-5) -> float:
73
+ r"""Measures the ratio of inter-group distance over intra-group
74
+ distance
75
+
76
+ .. math::
77
+ R_{\text{Group}} = \frac{\frac{1}{(C-1)^2} \sum_{i!=j}
78
+ \frac{1}{|\mathbf{X}_i||\mathbf{X}_j|} \sum_{\mathbf{x}_{iv}
79
+ \in \mathbf{X}_i } \sum_{\mathbf{x}_{jv^{\prime}} \in \mathbf{X}_j}
80
+ {\| \mathbf{x}_{iv} - \mathbf{x}_{jv^{\prime}} \|}_2 }{
81
+ \frac{1}{C} \sum_{i} \frac{1}{{|\mathbf{X}_i|}^2}
82
+ \sum_{\mathbf{x}_{iv}, \mathbf{x}_{iv^{\prime}} \in \mathbf{X}_i }
83
+ {\| \mathbf{x}_{iv} - \mathbf{x}_{iv^{\prime}} \|}_2 }
84
+
85
+ where :math:`\mathbf{X}_i` denotes the set of all nodes that belong to
86
+ class :math:`i`, and :math:`C` denotes the total number of classes in
87
+ :obj:`y`.
88
+ """
89
+ num_classes = int(y.max()) + 1
90
+
91
+ numerator = 0.
92
+ for i in range(num_classes):
93
+ mask = y == i
94
+ dist = torch.cdist(x[mask].unsqueeze(0), x[~mask].unsqueeze(0))
95
+ numerator += (1 / dist.numel()) * float(dist.sum())
96
+ numerator *= 1 / (num_classes - 1)**2
97
+
98
+ denominator = 0.
99
+ for i in range(num_classes):
100
+ mask = y == i
101
+ dist = torch.cdist(x[mask].unsqueeze(0), x[mask].unsqueeze(0))
102
+ denominator += (1 / dist.numel()) * float(dist.sum())
103
+ denominator *= 1 / num_classes
104
+
105
+ return numerator / (denominator + eps)
106
+
107
+ def __repr__(self):
108
+ return '{}({}, groups={})'.format(self.__class__.__name__,
109
+ self.in_channels, self.groups)
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/graph_norm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch_scatter import scatter_mean
6
+
7
+ from torch_geometric.nn.inits import zeros, ones
8
+
9
+
10
+ class GraphNorm(torch.nn.Module):
11
+ r"""Applies graph normalization over individual graphs as described in the
12
+ `"GraphNorm: A Principled Approach to Accelerating Graph Neural Network
13
+ Training" <https://arxiv.org/abs/2009.03294>`_ paper
14
+
15
+ .. math::
16
+ \mathbf{x}^{\prime}_i = \frac{\mathbf{x} - \alpha \odot
17
+ \textrm{E}[\mathbf{x}]}
18
+ {\sqrt{\textrm{Var}[\mathbf{x} - \alpha \odot \textrm{E}[\mathbf{x}]]
19
+ + \epsilon}} \odot \gamma + \beta
20
+
21
+ where :math:`\alpha` denotes parameters that learn how much information
22
+ to keep in the mean.
23
+
24
+ Args:
25
+ in_channels (int): Size of each input sample.
26
+ eps (float, optional): A value added to the denominator for numerical
27
+ stability. (default: :obj:`1e-5`)
28
+ """
29
+ def __init__(self, in_channels: int, eps: float = 1e-5):
30
+ super(GraphNorm, self).__init__()
31
+
32
+ self.in_channels = in_channels
33
+ self.eps = eps
34
+
35
+ self.weight = torch.nn.Parameter(torch.Tensor(in_channels))
36
+ self.bias = torch.nn.Parameter(torch.Tensor(in_channels))
37
+ self.mean_scale = torch.nn.Parameter(torch.Tensor(in_channels))
38
+
39
+ self.reset_parameters()
40
+
41
+ def reset_parameters(self):
42
+ ones(self.weight)
43
+ zeros(self.bias)
44
+ ones(self.mean_scale)
45
+
46
+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:
47
+ """"""
48
+ if batch is None:
49
+ batch = x.new_zeros(x.size(0), dtype=torch.long)
50
+
51
+ batch_size = int(batch.max()) + 1
52
+
53
+ mean = scatter_mean(x, batch, dim=0, dim_size=batch_size)[batch]
54
+ out = x - mean * self.mean_scale
55
+ var = scatter_mean(out.pow(2), batch, dim=0, dim_size=batch_size)
56
+ std = (var + self.eps).sqrt()[batch]
57
+ return self.weight * out / std + self.bias
58
+
59
+ def __repr__(self):
60
+ return f'{self.__class__.__name__}({self.in_channels})'
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_PyG_future/license.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/rusty1s/pytorch_geometric", which has the following license:
2
+
3
+
4
+ Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
5
+
6
+ Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ of this software and associated documentation files (the "Software"), to deal
8
+ in the Software without restriction, including without limitation the rights
9
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ copies of the Software, and to permit persons to whom the Software is
11
+ furnished to do so, subject to the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be included in
14
+ all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
+ THE SOFTWARE.
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .lattice import find_neighbors, _one_to_three, _compute_cube_index, _three_to_one
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (290 Bytes). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/__pycache__/lattice.cpython-312.pyc ADDED
Binary file (3.65 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/lattice.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import numpy as np
3
+
4
+
5
+ # The following internal methods are used in the get_points_in_sphere method.
6
+ def _compute_cube_index(coords: np.ndarray, global_min: float, radius: float
7
+ ) -> np.ndarray:
8
+ """
9
+ Compute the cube index from coordinates
10
+ Args:
11
+ coords: (nx3 array) atom coordinates
12
+ global_min: (float) lower boundary of coordinates
13
+ radius: (float) cutoff radius
14
+
15
+ Returns: (nx3 array) int indices
16
+
17
+ """
18
+ return np.array(np.floor((coords - global_min) / radius), dtype=int)
19
+
20
+ def _three_to_one(label3d: np.ndarray, ny: int, nz: int) -> np.ndarray:
21
+ """
22
+ The reverse of _one_to_three
23
+ """
24
+ return np.array(label3d[:, 0] * ny * nz +
25
+ label3d[:, 1] * nz + label3d[:, 2]).reshape((-1, 1))
26
+
27
+ def _one_to_three(label1d: np.ndarray, ny: int, nz: int) -> np.ndarray:
28
+ """
29
+ Convert a 1D index array to 3D index array
30
+
31
+ Args:
32
+ label1d: (array) 1D index array
33
+ ny: (int) number of cells in y direction
34
+ nz: (int) number of cells in z direction
35
+
36
+ Returns: (nx3) int array of index
37
+
38
+ """
39
+ last = np.mod(label1d, nz)
40
+ second = np.mod((label1d - last) / nz, ny)
41
+ first = (label1d - last - second * nz) / (ny * nz)
42
+ return np.concatenate([first, second, last], axis=1)
43
+
44
+ def find_neighbors(label: np.ndarray, nx: int, ny: int, nz: int):
45
+ """
46
+ Given a cube index, find the neighbor cube indices
47
+
48
+ Args:
49
+ label: (array) (n,) or (n x 3) indice array
50
+ nx: (int) number of cells in y direction
51
+ ny: (int) number of cells in y direction
52
+ nz: (int) number of cells in z direction
53
+
54
+ Returns: neighbor cell indices
55
+
56
+ """
57
+
58
+ array = [[-1, 0, 1]] * 3
59
+ neighbor_vectors = np.array(list(itertools.product(*array)),
60
+ dtype=int)
61
+ if np.shape(label)[1] == 1:
62
+ label3d = _one_to_three(label, ny, nz)
63
+ else:
64
+ label3d = label
65
+ all_labels = label3d[:, None, :] - neighbor_vectors[None, :, :]
66
+ filtered_labels = []
67
+ # filter out out-of-bound labels i.e., label < 0
68
+ for labels in all_labels:
69
+ ind = (labels[:, 0] < nx) * (labels[:, 1] < ny) * (labels[:, 2] < nz) * np.all(labels > -1e-5, axis=1)
70
+ filtered_labels.append(labels[ind])
71
+ return filtered_labels
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_pymatgen/license.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/materialsproject/pymatgen", which has the following license:
2
+
3
+
4
+ The MIT License (MIT)
5
+ Copyright (c) 2011-2012 MIT & The Regents of the University of California, through Lawrence Berkeley National Laboratory
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
8
+ this software and associated documentation files (the "Software"), to deal in
9
+ the Software without restriction, including without limitation the rights to
10
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11
+ the Software, and to permit persons to whom the Software is furnished to do so,
12
+ subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .acsf import GaussianBasis
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (207 Bytes). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/__pycache__/acsf.cpython-312.pyc ADDED
Binary file (2.45 kB). View file
 
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/acsf.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ def gaussian_smearing(distances, offset, widths, centered=False):
6
+ if not centered:
7
+ # compute width of Gaussian functions (using an overlap of 1 STDDEV)
8
+ coeff = -0.5 / torch.pow(widths, 2)
9
+ # Use advanced indexing to compute the individual components
10
+ diff = distances[..., None] - offset
11
+ else:
12
+ # if Gaussian functions are centered, use offsets to compute widths
13
+ coeff = -0.5 / torch.pow(offset, 2)
14
+ # if Gaussian functions are centered, no offset is subtracted
15
+ diff = distances[..., None]
16
+ # compute smear distance values
17
+ gauss = torch.exp(coeff * torch.pow(diff, 2))
18
+ return gauss
19
+
20
+
21
+ class GaussianBasis(nn.Module):
22
+ def __init__(
23
+ self, start=0.0, stop=5.0, n_gaussians=50, centered=False, trainable=False
24
+ ):
25
+ super(GaussianBasis, self).__init__()
26
+ # compute offset and width of Gaussian functions
27
+ offset = torch.linspace(start, stop, n_gaussians)
28
+ widths = torch.FloatTensor((offset[1] - offset[0]) * torch.ones_like(offset))
29
+ if trainable:
30
+ self.width = nn.Parameter(widths)
31
+ self.offsets = nn.Parameter(offset)
32
+ else:
33
+ self.register_buffer("width", widths)
34
+ self.register_buffer("offsets", offset)
35
+ self.centered = centered
36
+
37
+ def forward(self, distances):
38
+ """Compute smeared-gaussian distance values.
39
+
40
+ Args:
41
+ distances (torch.Tensor): interatomic distance values of
42
+ (N_b x N_at x N_nbh) shape.
43
+
44
+ Returns:
45
+ torch.Tensor: layer output of (N_b x N_at x N_nbh x N_g) shape.
46
+
47
+ """
48
+ return gaussian_smearing(
49
+ distances, self.offsets, self.width, centered=self.centered
50
+ )
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_schnetpack/license.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The code in this folder was obtained from "https://github.com/atomistic-machine-learning/schnetpack", which has the following license:
2
+
3
+
4
+ COPYRIGHT
5
+
6
+ Copyright (c) 2018 Kristof Schütt, Michael Gastegger, Pan Kessel, Kim Nicoli
7
+
8
+ All other contributions:
9
+ Copyright (c) 2018, the respective contributors.
10
+ All rights reserved.
11
+
12
+ Each contributor holds copyright over their respective contributions.
13
+ The project versioning (Git) records all such contribution source information.
14
+
15
+ LICENSE
16
+
17
+ The MIT License
18
+
19
+ Permission is hereby granted, free of charge, to any person obtaining a copy
20
+ of this software and associated documentation files (the "Software"), to deal
21
+ in the Software without restriction, including without limitation the rights
22
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23
+ copies of the Software, and to permit persons to whom the Software is
24
+ furnished to do so, subject to the following conditions:
25
+
26
+ The above copyright notice and this permission notice shall be included in all
27
+ copies or substantial portions of the Software.
28
+
29
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35
+ SOFTWARE.
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .representations import SphericalHarmonics
2_training/hamiltonian/infer_uc/dataset/00/pred_ham_std/src/deeph/from_se3_transformer/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (228 Bytes). View file