Upload config.yaml with huggingface_hub
Browse files- config.yaml +69 -0
config.yaml
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
data_mix:
|
| 2 |
+
(): dataset_construction.DataMix
|
| 3 |
+
name: "code_30b"
|
| 4 |
+
shuffle: false
|
| 5 |
+
local_save_dir: data/
|
| 6 |
+
load_from_local_save_dir: false
|
| 7 |
+
compute_dataset_stats: true
|
| 8 |
+
keep_separated_datasets_in_dataset_dict: false
|
| 9 |
+
deduplicate_test_set: false
|
| 10 |
+
ngram_path_for_extra_deduplication: null
|
| 11 |
+
max_shard_size: "2GB"
|
| 12 |
+
datasets:
|
| 13 |
+
# Code
|
| 14 |
+
- (): dataset_construction.DatasetConfig
|
| 15 |
+
dataset_path: 'bigcode/starcoderdata'
|
| 16 |
+
train_split: "train[:20%]"
|
| 17 |
+
text_column: "content"
|
| 18 |
+
dataset_kwargs:
|
| 19 |
+
data_dir: python
|
| 20 |
+
build_test_set_from_train: true
|
| 21 |
+
- (): dataset_construction.DatasetConfig
|
| 22 |
+
dataset_path: 'bigcode/starcoderdata'
|
| 23 |
+
train_split: "train[:20%]"
|
| 24 |
+
text_column: "content"
|
| 25 |
+
dataset_kwargs:
|
| 26 |
+
data_dir: markdown
|
| 27 |
+
build_test_set_from_train: true
|
| 28 |
+
- (): dataset_construction.DatasetConfig
|
| 29 |
+
dataset_path: 'bigcode/starcoderdata'
|
| 30 |
+
train_split: "train[:20%]"
|
| 31 |
+
text_column: "content"
|
| 32 |
+
dataset_kwargs:
|
| 33 |
+
data_dir: jupyter-scripts-dedup-filtered
|
| 34 |
+
build_test_set_from_train: true
|
| 35 |
+
- (): dataset_construction.DatasetConfig
|
| 36 |
+
dataset_path: 'bigcode/starcoderdata'
|
| 37 |
+
train_split: "train[:20%]"
|
| 38 |
+
text_column: "content"
|
| 39 |
+
dataset_kwargs:
|
| 40 |
+
data_dir: jupyter-structured-clean-dedup
|
| 41 |
+
build_test_set_from_train: true
|
| 42 |
+
- (): dataset_construction.DatasetConfig
|
| 43 |
+
dataset_path: 'bigcode/starcoderdata'
|
| 44 |
+
train_split: "train[:15%]"
|
| 45 |
+
text_column: "content"
|
| 46 |
+
dataset_kwargs:
|
| 47 |
+
data_dir: json
|
| 48 |
+
build_test_set_from_train: true
|
| 49 |
+
- (): dataset_construction.DatasetConfig
|
| 50 |
+
dataset_path: 'teven/code_contests'
|
| 51 |
+
train_split: "train"
|
| 52 |
+
# build_test_set_from_train: true
|
| 53 |
+
build_test_set_from_train: false
|
| 54 |
+
test_split: "valid"
|
| 55 |
+
# needs to be debugged
|
| 56 |
+
filtering_function:
|
| 57 |
+
(): dataset_collection.code.deepmind_code_contest.CodeContestFilter
|
| 58 |
+
preprocessing_function:
|
| 59 |
+
(): dataset_collection.code.deepmind_code_contest.CodeContestMapper
|
| 60 |
+
- (): dataset_construction.DatasetConfig
|
| 61 |
+
dataset_path: 'vikp/pypi_clean'
|
| 62 |
+
train_split: "train"
|
| 63 |
+
build_test_set_from_train: true
|
| 64 |
+
text_column: code
|
| 65 |
+
id_column: path
|
| 66 |
+
# we could add synthetic textbook datasets or code with explanations !
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
tokenizer: "mistralai/Mistral-7B-v0.1"
|