Datasets:
Tasks:
Translation
Modalities:
Text
Formats:
json
Sub-tasks:
text2text-generation
Size:
< 1K
License:
Commit ·
3cc2acc
0
Parent(s):
Reset history: keep only eval split
Browse files- .README.md.un~ +0 -0
- .dataset_infos.yaml.un~ +0 -0
- .gitattributes +59 -0
- README.md +97 -0
- extra/prepare_jsonl.py +43 -0
- validation.jsonl +0 -0
.README.md.un~
ADDED
|
Binary file (946 Bytes). View file
|
|
|
.dataset_infos.yaml.un~
ADDED
|
Binary file (523 Bytes). View file
|
|
|
.gitattributes
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
# Image files - compressed
|
| 54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
annotations_creators:
|
| 3 |
+
- expert-generated
|
| 4 |
+
language_creators:
|
| 5 |
+
- expert-generated
|
| 6 |
+
language:
|
| 7 |
+
- nb
|
| 8 |
+
- nn
|
| 9 |
+
license: cc-by-4.0
|
| 10 |
+
multilinguality:
|
| 11 |
+
- translation
|
| 12 |
+
pretty_name: Nynorsk Norm 200eval
|
| 13 |
+
size_categories:
|
| 14 |
+
- 1K<n<10K
|
| 15 |
+
source_datasets:
|
| 16 |
+
- original
|
| 17 |
+
task_categories:
|
| 18 |
+
- translation
|
| 19 |
+
task_ids:
|
| 20 |
+
- text2text-generation
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
# Nynorsk Norm 200eval
|
| 24 |
+
|
| 25 |
+
`nynorsk_norm_200eval` is a high-quality, small-scale parallel corpus comprising 200 Norwegian Bokmål–Nynorsk sentence pairs collected from official sources and public institutions. Each example includes:
|
| 26 |
+
|
| 27 |
+
- `nb`: Original sentence in Bokmål
|
| 28 |
+
- `nn_original`: Original Nynorsk sentence (typically an official translation)
|
| 29 |
+
- `nn_husnorm`: Sentence rewritten in Nynorsk following an internal *husnorm* (house style)
|
| 30 |
+
- `nb_source`, `nn_source`: Source URLs for the Bokmål and Nynorsk versions
|
| 31 |
+
|
| 32 |
+
## Benchmark
|
| 33 |
+
|
| 34 |
+
### Target: `nn_original` (official Nynorsk)
|
| 35 |
+
|
| 36 |
+
| Model | BLEU | chrF | Notes |
|
| 37 |
+
|------------------|------|------|---------------------|
|
| 38 |
+
| ExampleModel-v1 | XX.X | XX.X | Official translation baseline |
|
| 39 |
+
| ExampleModel-v2 | XX.X | XX.X | Fine-tuned on government data |
|
| 40 |
+
|
| 41 |
+
### Target: `nn_husnorm` (house style Nynorsk)
|
| 42 |
+
|
| 43 |
+
| Model | BLEU | chrF | Notes |
|
| 44 |
+
|------------------|------|------|---------------------------|
|
| 45 |
+
| ExampleModel-v1 | XX.X | XX.X | Without style normalization |
|
| 46 |
+
| ExampleModel-v2 | XX.X | XX.X | Fine-tuned on husnorm corpus |
|
| 47 |
+
|
| 48 |
+
> Will be replaced with real numbers as soon as we have performed the tests.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
## Intended Uses
|
| 52 |
+
|
| 53 |
+
This dataset is intended for:
|
| 54 |
+
|
| 55 |
+
- Evaluation of Bokmål-to-Nynorsk translation systems
|
| 56 |
+
- Research on Nynorsk stylistic variation
|
| 57 |
+
- Style transfer and normalization experiments, especially in public communication
|
| 58 |
+
|
| 59 |
+
## Format
|
| 60 |
+
|
| 61 |
+
The dataset is provided as a UTF-8 encoded JSON Lines (`.jsonl`) file. Each line is a structured example:
|
| 62 |
+
```json
|
| 63 |
+
{
|
| 64 |
+
"nb": "Ledelsen ved NMBU disponerer i dag to elbiler som de kan bruke på møter i nærområdet.",
|
| 65 |
+
"nn_original": "Leiinga ved NMBU disponerer i dag to elbilar som dei kan bruka på møte i nærområdet.",
|
| 66 |
+
"nn_husnorm": "Leiinga ved NMBU disponerer i dag to elbilar som dei kan bruke på møte i nærområdet.",
|
| 67 |
+
"nb_source": "https://www.nmbu.no/om/miljoarbeidet/milj-rsrapport-2020",
|
| 68 |
+
"nn_source": "https://www.nmbu.no/om/miljoarbeidet/miljotiltak"
|
| 69 |
+
}
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
## Nynorsk house style ("husnorm")
|
| 73 |
+
|
| 74 |
+
The `nn_husnorm` field contains Nynorsk text rewritten according to an internal style guide.
|
| 75 |
+
This house norm favors Bokmål-near variants for improved consistency and accessibility in public texts.
|
| 76 |
+
|
| 77 |
+
General characteristics:
|
| 78 |
+
- Use of samsvarsbøying (agreement)
|
| 79 |
+
- Prefer active voice where natural
|
| 80 |
+
- Rewrite s-genitive using prepositions, compounds, or possessives (sin/si)
|
| 81 |
+
- Use e-infinitive: å komme (not å kome)
|
| 82 |
+
- Use bli, blei, blitt (not verte, vart, vorte)
|
| 83 |
+
- Pronouns: vi, ho, henne (not me, ho, henne)
|
| 84 |
+
- Adverbs: nå, da, derfor, fordi (not no, då, difor, av di)
|
| 85 |
+
- Noun plural forms:
|
| 86 |
+
- gjestar, nadar instead of gjester, nader
|
| 87 |
+
- elver, helger instead of elvar, helgar
|
| 88 |
+
- Bokmål-like variants of verbs and nouns:
|
| 89 |
+
- følge, givar, følgar over følgje, gjevar, følgjar
|
| 90 |
+
- Prefer forelesing, foreslå over førelesing, føreslå
|
| 91 |
+
- Use doubled consonants: komme, lønne (not kome, løne)
|
| 92 |
+
- Lexical choices like nødvendig, allmente are accepted
|
| 93 |
+
|
| 94 |
+
## Authors
|
| 95 |
+
|
| 96 |
+
- Husnorm adaptation and linguistic quality control: **Arne Martinus Lidstad** and **Marie Røsok**
|
| 97 |
+
- Dataset formatting and Hugging Face integration: **Per Egil Kummervold**
|
extra/prepare_jsonl.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import json
|
| 4 |
+
import argparse
|
| 5 |
+
|
| 6 |
+
def convert_csv_to_jsonl(input_file, output_file):
|
| 7 |
+
# Read with semicolon delimiter
|
| 8 |
+
df = pd.read_csv(input_file, delimiter=";", encoding="utf-8")
|
| 9 |
+
|
| 10 |
+
# Drop empty/unnamed columns
|
| 11 |
+
df = df.drop(columns=[col for col in df.columns if "Unnamed" in col or col.strip() == ""], errors="ignore")
|
| 12 |
+
|
| 13 |
+
# Strip leading/trailing spaces from column names
|
| 14 |
+
df.columns = df.columns.str.strip()
|
| 15 |
+
|
| 16 |
+
# Rename to desired schema
|
| 17 |
+
df = df.rename(columns={
|
| 18 |
+
"nob": "nb",
|
| 19 |
+
"nno": "nn_original",
|
| 20 |
+
"nb_nno": "nn_husnorm",
|
| 21 |
+
"nob_url": "nb_source",
|
| 22 |
+
"nno_url": "nn_source"
|
| 23 |
+
})
|
| 24 |
+
|
| 25 |
+
# Validate required fields
|
| 26 |
+
required = ["nb", "nn_original", "nn_husnorm"]
|
| 27 |
+
missing = [col for col in required if col not in df.columns]
|
| 28 |
+
if missing:
|
| 29 |
+
raise ValueError(f"Missing required columns: {missing}")
|
| 30 |
+
|
| 31 |
+
# Write JSONL output
|
| 32 |
+
with open(output_file, "w", encoding="utf-8") as out_file:
|
| 33 |
+
for row in df.to_dict(orient="records"):
|
| 34 |
+
json.dump(row, out_file, ensure_ascii=False)
|
| 35 |
+
out_file.write("\n")
|
| 36 |
+
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
parser = argparse.ArgumentParser(description="Convert CSV to JSONL for nynorsk_norm_200eval")
|
| 39 |
+
parser.add_argument("--input_file", required=True, help="Path to input CSV file")
|
| 40 |
+
parser.add_argument("--output_file", required=True, help="Path to output JSONL file")
|
| 41 |
+
args = parser.parse_args()
|
| 42 |
+
|
| 43 |
+
convert_csv_to_jsonl(args.input_file, args.output_file)
|
validation.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|