File size: 493 Bytes
94aeaa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
{
  "name": "Summarization Fine-tuning Dataset",
  "description": "A dataset for fine-tuning small language models on summarization tasks",
  "format": "alpaca",
  "statistics": {
    "total_examples": 2000,
    "train_examples": 1600,
    "val_examples": 200,
    "test_examples": 200,
    "dataset_distribution": {
      "xsum": {
        "count": 2000,
        "percentage": 100.0
      }
    }
  },
  "configuration": {
    "max_tokens": 2000,
    "tokenizer": "gpt2",
    "seed": 42
  }
}