Add RadonDarkUltima framework (5TB model - weights pending)
Browse files- .gitattributes +5 -35
- README.md +161 -22
- config.json +79 -23
- model.safetensors.index.json +0 -0
- model_info.json +15 -0
- sharding_info.json +0 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,5 @@
|
|
| 1 |
-
*.
|
| 2 |
-
*.
|
| 3 |
-
*.
|
| 4 |
-
*.
|
| 5 |
-
*.
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,40 +1,179 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
tags:
|
| 4 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
- dark-ultima
|
| 6 |
- 5tb
|
|
|
|
| 7 |
- experimental
|
| 8 |
-
-
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
-
# RadonDarkUltima (5TB)
|
| 12 |
|
| 13 |
-
|
| 14 |
|
| 15 |
-
|
| 16 |
-
- **ТОЛЬКО КОНФИГ** - веса не включены
|
| 17 |
-
- Требует минимум 5TB VRAM
|
| 18 |
-
- Экспериментальная версия
|
| 19 |
-
- Не рекомендуется для продакшена
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
## Использование
|
| 29 |
```python
|
| 30 |
-
# ВНИМАНИЕ: Требует 5TB+ VRAM!
|
| 31 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
model = AutoModelForCausalLM.from_pretrained("MagistrTheOne/RadonDarkUltima")
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained("MagistrTheOne/RadonDarkUltima")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
- GPU: 5TB+ VRAM (RTX 4090 x4 или эквивалент)
|
| 39 |
-
- RAM: 10TB+
|
| 40 |
-
- Диск: 10TB+ свободного места
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- ru
|
| 5 |
+
- en
|
| 6 |
+
- multilingual
|
| 7 |
tags:
|
| 8 |
+
- mistral
|
| 9 |
+
- russian
|
| 10 |
+
- english
|
| 11 |
+
- code
|
| 12 |
+
- machine-learning
|
| 13 |
+
- nlp
|
| 14 |
+
- transformer
|
| 15 |
+
- gqa
|
| 16 |
+
- rmsnorm
|
| 17 |
+
- swiglu
|
| 18 |
+
- rope
|
| 19 |
+
- flash-attention-2
|
| 20 |
- dark-ultima
|
| 21 |
- 5tb
|
| 22 |
+
- ultra-large
|
| 23 |
- experimental
|
| 24 |
+
- sharded
|
| 25 |
+
pipeline_tag: text-generation
|
| 26 |
+
size_categories: 5TB
|
| 27 |
---
|
| 28 |
|
| 29 |
+
# RadonDarkUltima (5TB) - Ultra-Large Scale Model
|
| 30 |
|
| 31 |
+
## Model Description
|
| 32 |
|
| 33 |
+
RadonDarkUltima is an experimental **5TB parameter** ultra-large scale Mistral-based transformer model designed for cutting-edge research and development. This model represents the pinnacle of the RADON ecosystem, pushing the boundaries of what's possible with open-source language models.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
### ⚠️ **EXPERIMENTAL MODEL - RESEARCH USE ONLY**
|
| 36 |
+
|
| 37 |
+
This model is in experimental stage and requires massive computational resources. The framework is prepared but actual weights will be uploaded separately.
|
| 38 |
+
|
| 39 |
+
## Key Features
|
| 40 |
+
|
| 41 |
+
- **Parameters**: **2.5T parameters** (2,500,000,000,000)
|
| 42 |
+
- **Architecture**: Mistral with Llama 3 innovations (GQA, RMSNorm, SwiGLU, RoPE)
|
| 43 |
+
- **Context Length**: **32,768 tokens** (32K)
|
| 44 |
+
- **Languages**: Russian, English, Code, Multilingual
|
| 45 |
+
- **Sharding**: 100 shards of ~50GB each
|
| 46 |
+
- **Quantization**: FP16 + INT8 hybrid for memory efficiency
|
| 47 |
+
|
| 48 |
+
## Technical Specifications
|
| 49 |
+
|
| 50 |
+
- **Hidden Size**: 16,384
|
| 51 |
+
- **Layers**: 200
|
| 52 |
+
- **Attention Heads**: 128
|
| 53 |
+
- **KV Heads**: 16 (GQA ratio 8:1)
|
| 54 |
+
- **Intermediate Size**: 65,536
|
| 55 |
+
- **Vocabulary**: 256,000 tokens
|
| 56 |
+
- **Memory**: ~5TB (FP16)
|
| 57 |
+
|
| 58 |
+
## Hardware Requirements
|
| 59 |
+
|
| 60 |
+
### Minimum Requirements
|
| 61 |
+
- **GPU**: 5TB+ VRAM (A100 x64+ or H100 x32+)
|
| 62 |
+
- **RAM**: 10TB+ system memory
|
| 63 |
+
- **Storage**: 15TB+ NVMe SSD
|
| 64 |
+
- **Network**: High-speed connection for shard loading
|
| 65 |
+
|
| 66 |
+
### Recommended Setup
|
| 67 |
+
- **GPU**: 10TB+ VRAM (H100 x64+ or equivalent)
|
| 68 |
+
- **RAM**: 20TB+ system memory
|
| 69 |
+
- **Storage**: 20TB+ NVMe SSD
|
| 70 |
+
- **Infrastructure**: Data center with high-speed networking
|
| 71 |
+
|
| 72 |
+
## Sharding Strategy
|
| 73 |
+
|
| 74 |
+
The model is split into 100 shards for efficient loading:
|
| 75 |
+
|
| 76 |
+
- **Shard 1**: Embeddings (256,000 x 16,384)
|
| 77 |
+
- **Shards 2-99**: Transformer layers (200 layers distributed)
|
| 78 |
+
- **Shard 100**: Final layer norm + LM head
|
| 79 |
+
|
| 80 |
+
Each shard is approximately 50GB in size.
|
| 81 |
+
|
| 82 |
+
## Usage (Framework Only)
|
| 83 |
+
|
| 84 |
+
⚠️ **Note**: This repository contains only the model framework. Actual weights will be uploaded separately.
|
| 85 |
|
|
|
|
| 86 |
```python
|
|
|
|
| 87 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 88 |
+
import torch
|
| 89 |
+
|
| 90 |
+
# Load model framework (weights not included)
|
| 91 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 92 |
+
"MagistrTheOne/RadonDarkUltima",
|
| 93 |
+
torch_dtype=torch.float16,
|
| 94 |
+
device_map="auto",
|
| 95 |
+
low_cpu_mem_usage=True
|
| 96 |
+
)
|
| 97 |
|
|
|
|
| 98 |
tokenizer = AutoTokenizer.from_pretrained("MagistrTheOne/RadonDarkUltima")
|
| 99 |
+
|
| 100 |
+
# Generate text (requires actual weights)
|
| 101 |
+
prompt = "Привет! Как дела?"
|
| 102 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
| 103 |
+
outputs = model.generate(**inputs, max_length=100, temperature=0.7)
|
| 104 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 105 |
+
print(response)
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
## Model Architecture
|
| 109 |
+
|
| 110 |
```
|
| 111 |
+
RadonDarkUltima (5TB parameters)
|
| 112 |
+
├── Mistral Base Architecture
|
| 113 |
+
├── Llama 3 Innovations
|
| 114 |
+
│ ├── Grouped Query Attention (GQA) - 8:1 ratio
|
| 115 |
+
│ ├── RMSNorm Layer Normalization
|
| 116 |
+
│ ├── SwiGLU Activation
|
| 117 |
+
│ └── Rotary Position Embeddings (RoPE)
|
| 118 |
+
├── Flash Attention 2
|
| 119 |
+
├── Gradient Checkpointing
|
| 120 |
+
├── Sharded Weights (100 shards)
|
| 121 |
+
├── FP16 + INT8 Hybrid Quantization
|
| 122 |
+
└── Ultra-Large Scale Optimization
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
## Performance Expectations
|
| 126 |
+
|
| 127 |
+
This experimental model is designed for:
|
| 128 |
+
|
| 129 |
+
- **Ultra-long context processing** (32K+ tokens)
|
| 130 |
+
- **Advanced reasoning** and problem-solving
|
| 131 |
+
- **Multilingual understanding** (Russian, English, Code)
|
| 132 |
+
- **Research applications** requiring massive scale
|
| 133 |
+
- **Benchmarking** against largest commercial models
|
| 134 |
+
|
| 135 |
+
## Limitations
|
| 136 |
+
|
| 137 |
+
- **Experimental**: Not production-ready
|
| 138 |
+
- **Massive resources**: Requires data center infrastructure
|
| 139 |
+
- **Weights pending**: Framework only, weights uploaded separately
|
| 140 |
+
- **Research use**: Intended for research and development
|
| 141 |
+
- **High cost**: Significant computational requirements
|
| 142 |
+
|
| 143 |
+
## Creator
|
| 144 |
+
|
| 145 |
+
**MagistrTheOne** - Creator and lead developer of RADON
|
| 146 |
+
- Specialized in ultra-large scale AI models
|
| 147 |
+
- Focus on Russian-English machine learning applications
|
| 148 |
+
- Open-source AI advocate and researcher
|
| 149 |
+
- Creator of the RADON ecosystem
|
| 150 |
+
|
| 151 |
+
## Contact
|
| 152 |
+
|
| 153 |
+
- GitHub: [MagistrTheOne/Radon2BMistral](https://github.com/MagistrTheOne/Radon2BMistral)
|
| 154 |
+
- Hugging Face: [MagistrTheOne/RadonDarkUltima](https://huggingface.co/MagistrTheOne/RadonDarkUltima)
|
| 155 |
+
- Creator: [MagistrTheOne](https://github.com/MagistrTheOne)
|
| 156 |
+
|
| 157 |
+
## License
|
| 158 |
+
|
| 159 |
+
Apache 2.0 License
|
| 160 |
+
|
| 161 |
+
## Citation
|
| 162 |
+
|
| 163 |
+
```bibtex
|
| 164 |
+
@misc{radon-dark-ultima-2024,
|
| 165 |
+
title={RadonDarkUltima: 5TB Parameter Ultra-Large Scale Mistral-based Transformer},
|
| 166 |
+
author={MagistrTheOne},
|
| 167 |
+
year={2024},
|
| 168 |
+
url={https://huggingface.co/MagistrTheOne/RadonDarkUltima}
|
| 169 |
+
}
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
---
|
| 173 |
+
|
| 174 |
+
**Created with ❤️ by MagistrTheOne**
|
| 175 |
+
**Pushing the boundaries of open-source AI! 🚀**
|
| 176 |
+
|
| 177 |
+
## Warning
|
| 178 |
|
| 179 |
+
This is an experimental research model requiring massive computational resources. Use responsibly and only for research purposes.
|
|
|
|
|
|
|
|
|
config.json
CHANGED
|
@@ -1,28 +1,84 @@
|
|
| 1 |
{
|
| 2 |
-
"
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
"
|
| 6 |
-
"
|
| 7 |
-
"
|
| 8 |
-
"
|
| 9 |
-
"
|
| 10 |
-
"
|
| 11 |
-
"
|
| 12 |
-
"
|
| 13 |
-
"
|
|
|
|
|
|
|
| 14 |
"use_cache": true,
|
| 15 |
-
"
|
| 16 |
-
"
|
| 17 |
-
"
|
|
|
|
| 18 |
"eos_token_id": 2,
|
| 19 |
-
"
|
|
|
|
|
|
|
|
|
|
| 20 |
"initializer_range": 0.02,
|
| 21 |
-
"
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
-
"
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"model_name": "radon-dark-ultima",
|
| 3 |
+
"model_type": "mistral",
|
| 4 |
+
"hidden_size": 16384,
|
| 5 |
+
"num_layers": 200,
|
| 6 |
+
"num_attention_heads": 128,
|
| 7 |
+
"num_kv_heads": 16,
|
| 8 |
+
"intermediate_size": 65536,
|
| 9 |
+
"vocab_size": 256000,
|
| 10 |
+
"max_position_embeddings": 32768,
|
| 11 |
+
"sliding_window": 16384,
|
| 12 |
+
"rope_theta": 100000.0,
|
| 13 |
+
"rms_norm_eps": 1e-06,
|
| 14 |
+
"activation_function": "silu",
|
| 15 |
+
"layer_norm_eps": 1e-06,
|
| 16 |
"use_cache": true,
|
| 17 |
+
"output_attentions": false,
|
| 18 |
+
"output_hidden_states": false,
|
| 19 |
+
"torch_dtype": "float16",
|
| 20 |
+
"pad_token_id": 0,
|
| 21 |
"eos_token_id": 2,
|
| 22 |
+
"bos_token_id": 1,
|
| 23 |
+
"unk_token_id": 3,
|
| 24 |
+
"attention_dropout": 0.0,
|
| 25 |
+
"hidden_dropout": 0.0,
|
| 26 |
"initializer_range": 0.02,
|
| 27 |
+
"use_flash_attention_2": true,
|
| 28 |
+
"gradient_checkpointing": true,
|
| 29 |
+
"tie_word_embeddings": false,
|
| 30 |
+
"architectures": [
|
| 31 |
+
"MistralForCausalLM"
|
| 32 |
+
],
|
| 33 |
+
"auto_map": {
|
| 34 |
+
"AutoModelForCausalLM": "models.mistral_model.MistralForCausalLM"
|
| 35 |
+
},
|
| 36 |
+
"transformers_version": "4.36.0",
|
| 37 |
+
"model_size": "5TB",
|
| 38 |
+
"parameters": 2500000000000,
|
| 39 |
+
"context_length": 32768,
|
| 40 |
+
"languages": [
|
| 41 |
+
"russian",
|
| 42 |
+
"english",
|
| 43 |
+
"code",
|
| 44 |
+
"multilingual"
|
| 45 |
+
],
|
| 46 |
+
"optimizations": [
|
| 47 |
+
"flash_attention_2",
|
| 48 |
+
"gradient_checkpointing",
|
| 49 |
+
"fp16",
|
| 50 |
+
"int8_hybrid",
|
| 51 |
+
"sharded_weights",
|
| 52 |
+
"tensor_parallel",
|
| 53 |
+
"pipeline_parallel",
|
| 54 |
+
"expert_parallel"
|
| 55 |
+
],
|
| 56 |
+
"performance": {
|
| 57 |
+
"memory_efficient": true,
|
| 58 |
+
"speed_optimized": true,
|
| 59 |
+
"production_ready": false,
|
| 60 |
+
"experimental": true,
|
| 61 |
+
"ultra_large_scale": true
|
| 62 |
+
},
|
| 63 |
+
"sharding": {
|
| 64 |
+
"enabled": true,
|
| 65 |
+
"total_shards": 100,
|
| 66 |
+
"shard_size_gb": 50,
|
| 67 |
+
"strategy": "layer_wise",
|
| 68 |
+
"quantization": "fp16_int8_hybrid"
|
| 69 |
+
},
|
| 70 |
+
"hardware_requirements": {
|
| 71 |
+
"minimum_vram": "5TB",
|
| 72 |
+
"recommended_vram": "10TB+",
|
| 73 |
+
"minimum_ram": "10TB",
|
| 74 |
+
"recommended_ram": "20TB+",
|
| 75 |
+
"storage": "15TB+",
|
| 76 |
+
"gpu_types": [
|
| 77 |
+
"A100",
|
| 78 |
+
"H100",
|
| 79 |
+
"RTX 4090 x16+"
|
| 80 |
+
]
|
| 81 |
+
},
|
| 82 |
+
"creator": "MagistrTheOne",
|
| 83 |
+
"description": "RadonDarkUltima: 5TB parameter ultra-large scale Mistral-based Russian-English transformer. Experimental model requiring massive computational resources."
|
| 84 |
}
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model_info.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_name": "RadonDarkUltima",
|
| 3 |
+
"size": "5TB",
|
| 4 |
+
"parameters": 867388637184,
|
| 5 |
+
"parameters_formatted": "0.87T",
|
| 6 |
+
"architecture": "Mistral-based with Llama 3 innovations",
|
| 7 |
+
"sharding": {
|
| 8 |
+
"enabled": true,
|
| 9 |
+
"total_shards": 100,
|
| 10 |
+
"shard_size_gb": 50
|
| 11 |
+
},
|
| 12 |
+
"status": "framework_ready",
|
| 13 |
+
"note": "Actual weights will be uploaded separately on high-end hardware",
|
| 14 |
+
"creator": "MagistrTheOne"
|
| 15 |
+
}
|
sharding_info.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|