Datasets:
| import huggingface_hub | |
| from datasets import load_dataset | |
| import numpy as np | |
| # Load and use; | |
| # Should be able to just paste this into colab and it'll work with no fuss. | |
| # -> streaming=False downloads the split -> cannot stream a split from disk according to HF datasets currently. | |
| ds = load_dataset("AbstractPhil/geometric-vocab", name="unicode_64d", split='train', streaming=False) | |
| test_crystal = {} | |
| # This is NOT for production use. This is an example showing loading the repo, preparing a crystal and then breaking. | |
| # For production; you will want to batch with workers, prefetch, and implement proper accel, pyring, or a combination of multi-gpu capable systems. | |
| for item in ds: | |
| token = item['token'] # Our token; raw string or character depending on need. For us is unicode so character. | |
| crystal_flat = item['crystal'] # Flattened array, we need to shape this to our correct form. | |
| # Reshape to 5 vertices × 64 dimensions. | |
| crystal = np.array(crystal_flat).reshape(5, 64) | |
| volume = item['volume'] # Cayley-Menger volume, used to calculate trajectory and delta to prevent combination variants from overlapping. | |
| test_crystal = { | |
| "token": token, | |
| "crystal": crystal, | |
| "volume": volume | |
| } | |
| break | |
| print("Test case;\n") | |
| print(test_crystal) |