Datasets:
Tasks:
Text Generation
Sub-tasks:
dialogue-modeling
Languages:
Russian
Size:
1M<n<10M
Tags:
conversations
License:
Commit
·
75a2ffb
verified
·
0
Parent(s):
Duplicate from inkoziev/Conversations
Browse filesCo-authored-by: ilya koziev <inkoziev@users.noreply.huggingface.co>
- .gitattributes +60 -0
- Conversations.py +68 -0
- README.md +112 -0
- conversations.jsonl.gz +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
# Image files - compressed
|
| 54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
conversations.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
Conversations.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
import os
|
| 3 |
+
from datasets import Dataset, DatasetDict
|
| 4 |
+
import gzip
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_CITATION = """\
|
| 9 |
+
@misc{Conversations,
|
| 10 |
+
author = {Ilya Koziev},
|
| 11 |
+
title = {Russian-Language Dialogues Dataset},
|
| 12 |
+
year = {2025},
|
| 13 |
+
publisher = {Hugging Face},
|
| 14 |
+
howpublished = {\\url{https://huggingface.co/datasets/inkoziev/Conversations}},
|
| 15 |
+
}
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_DESCRIPTION = """\
|
| 20 |
+
Russian-Language Dialogues Dataset
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DatasetConfig(datasets.BuilderConfig):
|
| 25 |
+
def __init__(self, **kwargs):
|
| 26 |
+
super(DatasetConfig, self).__init__(**kwargs)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Conversations(datasets.GeneratorBasedBuilder):
|
| 31 |
+
BUILDER_CONFIG_CLASS = DatasetConfig
|
| 32 |
+
BUILDER_CONFIGS = [
|
| 33 |
+
DatasetConfig(name="Conversations",
|
| 34 |
+
version=datasets.Version("27.02.2025"),
|
| 35 |
+
description=_DESCRIPTION,
|
| 36 |
+
),
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
def _info(self):
|
| 40 |
+
return datasets.DatasetInfo(
|
| 41 |
+
description=_DESCRIPTION,
|
| 42 |
+
features=datasets.Features(
|
| 43 |
+
{
|
| 44 |
+
"domain": datasets.Value("string"),
|
| 45 |
+
"conversation": datasets.Value("string"),
|
| 46 |
+
}
|
| 47 |
+
),
|
| 48 |
+
supervised_keys=None,
|
| 49 |
+
homepage="",
|
| 50 |
+
citation=_CITATION,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
def _split_generators(self, dl_manager):
|
| 54 |
+
download_url = "https://huggingface.co/datasets/inkoziev/Conversations/resolve/main/conversations.jsonl.gz"
|
| 55 |
+
#download_url = "/home/inkoziev/github/Conversations/conversations.jsonl.gz"
|
| 56 |
+
path = dl_manager.download(download_url)
|
| 57 |
+
return [
|
| 58 |
+
datasets.SplitGenerator(
|
| 59 |
+
name=datasets.Split.TRAIN,
|
| 60 |
+
gen_kwargs={"datapath": path},
|
| 61 |
+
)
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
def _generate_examples(self, datapath):
|
| 65 |
+
with gzip.open(datapath, "rt", encoding="utf-8") as f:
|
| 66 |
+
for iline, line in enumerate(f):
|
| 67 |
+
yield iline, json.loads(line)
|
| 68 |
+
|
README.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
annotations_creators: []
|
| 3 |
+
language:
|
| 4 |
+
- ru
|
| 5 |
+
language_creators:
|
| 6 |
+
- machine-generated
|
| 7 |
+
license:
|
| 8 |
+
- cc-by-4.0
|
| 9 |
+
multilinguality:
|
| 10 |
+
- monolingual
|
| 11 |
+
pretty_name: 'Conversations'
|
| 12 |
+
size_categories:
|
| 13 |
+
- 1M<n<10M
|
| 14 |
+
source_datasets: []
|
| 15 |
+
tags:
|
| 16 |
+
- conversations
|
| 17 |
+
task_categories:
|
| 18 |
+
- text-generation
|
| 19 |
+
task_ids:
|
| 20 |
+
- dialogue-modeling
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
# Russian-Language Dialogues Dataset (RLDD)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## Dataset Overview
|
| 27 |
+
|
| 28 |
+
This dataset contains approximately 9 million Russian-language dialogues collected from diverse sources, including:
|
| 29 |
+
|
| 30 |
+
- [Jokes](https://huggingface.co/datasets/inkoziev/jokes_dialogues)
|
| 31 |
+
- [Literary works](https://github.com/Koziev/NLP_Datasets?tab=readme-ov-file#%D0%B4%D0%B8%D0%B0%D0%BB%D0%BE%D0%B3%D0%B8-%D0%B8-%D0%BE%D0%B1%D0%BC%D0%B5%D0%BD%D1%8B-%D1%80%D0%B5%D0%BF%D0%BB%D0%B8%D0%BA%D0%B0%D0%BC%D0%B8)
|
| 32 |
+
- Online forums
|
| 33 |
+
- [A small portion of synthetic dialogues](https://huggingface.co/datasets/inkoziev/arithmetic)
|
| 34 |
+
|
| 35 |
+
Each dialogue is tagged with a generalized source identifier, enabling targeted filtering. The dataset has undergone basic preprocessing, including deduplication, cleaning, typographical normalization, and minor typo correction.
|
| 36 |
+
|
| 37 |
+
Sample dialogue:
|
| 38 |
+
|
| 39 |
+
```
|
| 40 |
+
- Вы уже кому-нибудь читали свои стихи?
|
| 41 |
+
- Нет, а что?
|
| 42 |
+
- Просто у Вас глаз подбит.
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
Each conversation turn is started with em dash symbol.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
## Intended Use
|
| 49 |
+
|
| 50 |
+
This dataset is designed for:
|
| 51 |
+
|
| 52 |
+
- Statistical research in linguistics
|
| 53 |
+
|
| 54 |
+
- Supplementary training data for pretraining small language models
|
| 55 |
+
|
| 56 |
+
- Experiments in conversational AI and NLP tasks
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
## Usage
|
| 60 |
+
|
| 61 |
+
```python
|
| 62 |
+
from datasets import load_dataset
|
| 63 |
+
|
| 64 |
+
for sample in load_dataset("inkoziev/Conversations", split="train", trust_remote_code=True, streaming=True):
|
| 65 |
+
print(sample['conversation'])
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
## Citing
|
| 70 |
+
|
| 71 |
+
If you use this dataset in your research or projects, please cite it as follows:
|
| 72 |
+
|
| 73 |
+
```bibtex
|
| 74 |
+
@misc{Conversations,
|
| 75 |
+
author = {Ilya Koziev},
|
| 76 |
+
title = {Russian-Language Dialogues Dataset},
|
| 77 |
+
year = {2025},
|
| 78 |
+
publisher = {Hugging Face},
|
| 79 |
+
howpublished = {\url{https://huggingface.co/datasets/inkoziev/Conversations}},
|
| 80 |
+
}
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
## License
|
| 84 |
+
|
| 85 |
+
This dataset is licensed under the [CC-BY-NC-4.0](https://creativecommons.org/licenses/by-nc/4.0/) license, which permits non-commercial use only.
|
| 86 |
+
For commercial use, please contact the author at [inkoziev@gmail.com].
|
| 87 |
+
|
| 88 |
+
By using this dataset, you agree to:
|
| 89 |
+
- Provide proper attribution to the author.
|
| 90 |
+
- Refrain from using the dataset for commercial purposes without explicit permission.
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
## Limitations
|
| 94 |
+
|
| 95 |
+
- **Automated Processing**: The dataset was processed automatically with only selective manual checks. As a result, some dialogues may contain errors such as neighboring lines incorrectly split between participants.
|
| 96 |
+
|
| 97 |
+
- **Uncorrected Errors**: Grammatical errors, typos, and spelling mistakes present in the original sources have been preserved. These may impact the performance of language models trained on this dataset.
|
| 98 |
+
|
| 99 |
+
- **Data Quality Variability**: Due to the diverse sources of the dialogues, the quality and consistency of the data may vary significantly.
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
## Ethical considerations
|
| 103 |
+
|
| 104 |
+
The dataset includes dialogues sourced from internet forums, which may contain:
|
| 105 |
+
|
| 106 |
+
- **Profanity and offensive language**
|
| 107 |
+
|
| 108 |
+
- **Insults or derogatory remarks**
|
| 109 |
+
|
| 110 |
+
- **Misinformation or fake facts**
|
| 111 |
+
|
| 112 |
+
Users of this dataset should be aware of these potential issues and take appropriate measures to filter or handle such content responsibly, especially when deploying models trained on this data in real-world applications.
|
conversations.jsonl.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea041fa58f4faf4f79d627eaac8508eb2bba3f06187002709112e745c2ed876d
|
| 3 |
+
size 1574505230
|