Ishaank18 commited on
Commit
7964002
·
verified ·
1 Parent(s): b98fd5e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -57
  2. README.md +195 -0
  3. example_usage.py +81 -0
  4. metadata.json +402 -0
  5. test/base.parquet +3 -0
  6. test/bert_surprisal.parquet +3 -0
  7. test/character_arcs.parquet +3 -0
  8. test/emotional.parquet +3 -0
  9. test/gc_academic.parquet +3 -0
  10. test/gc_basic.parquet +3 -0
  11. test/gc_char_diversity.parquet +3 -0
  12. test/gc_concreteness.parquet +3 -0
  13. test/gc_dialogue.parquet +3 -0
  14. test/gc_discourse.parquet +3 -0
  15. test/gc_narrative.parquet +3 -0
  16. test/gc_polarity.parquet +3 -0
  17. test/gc_pos.parquet +3 -0
  18. test/gc_pronouns.parquet +3 -0
  19. test/gc_punctuation.parquet +3 -0
  20. test/gc_readability.parquet +3 -0
  21. test/gc_syntax.parquet +3 -0
  22. test/gc_temporal.parquet +3 -0
  23. test/ngram.parquet +3 -0
  24. test/ngram_surprisal.parquet +3 -0
  25. test/plot_shifts.parquet +3 -0
  26. test/rst.parquet +3 -0
  27. test/structure.parquet +3 -0
  28. test/surprisal.parquet +3 -0
  29. train/base.parquet +3 -0
  30. train/bert_surprisal.parquet +3 -0
  31. train/character_arcs.parquet +3 -0
  32. train/emotional.parquet +3 -0
  33. train/gc_academic.parquet +3 -0
  34. train/gc_basic.parquet +3 -0
  35. train/gc_char_diversity.parquet +3 -0
  36. train/gc_concreteness.parquet +3 -0
  37. train/gc_dialogue.parquet +3 -0
  38. train/gc_discourse.parquet +3 -0
  39. train/gc_narrative.parquet +3 -0
  40. train/gc_polarity.parquet +3 -0
  41. train/gc_pos.parquet +3 -0
  42. train/gc_pronouns.parquet +3 -0
  43. train/gc_punctuation.parquet +3 -0
  44. train/gc_readability.parquet +3 -0
  45. train/gc_syntax.parquet +3 -0
  46. train/gc_temporal.parquet +3 -0
  47. train/ngram.parquet +3 -0
  48. train/ngram_surprisal.parquet +3 -0
  49. train/plot_shifts.parquet +3 -0
  50. train/rst.parquet +3 -0
.gitattributes CHANGED
@@ -1,59 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
  *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.parquet filter=lfs diff=lfs merge=lfs -text
2
+ *.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  *.zip filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ task_categories:
4
+ - text-classification
5
+ tags:
6
+ - screenplay
7
+ - narrative
8
+ - salience
9
+ - linguistics
10
+ language:
11
+ - en
12
+ size_categories:
13
+ - 100K<n<1M
14
+ ---
15
+
16
+ # Screenplay Scene Salience Features
17
+
18
+ Pre-extracted linguistic and narrative features for screenplay scene salience detection from the MENSA dataset.
19
+
20
+ ## Dataset Description
21
+
22
+ This dataset contains **913 linguistic features** extracted from movie screenplays in the MENSA dataset. Features are organized into **24 feature groups** covering various aspects of linguistic, narrative, and discourse analysis.
23
+
24
+ ### Dataset Statistics
25
+
26
+ | Split | Samples | Size |
27
+ |-------|---------|------|
28
+ | Train | 117,503 | 172.9 MB |
29
+ | Validation | 8,052 | 16.1 MB |
30
+ | Test | 8,156 | 16.1 MB |
31
+ | **Total** | **133,711** | **140.1 MB** |
32
+
33
+ ### Feature Groups (24 groups)
34
+
35
+ - `base`
36
+ - `bert_surprisal`
37
+ - `character_arcs`
38
+ - `emotional`
39
+ - `gc_academic`
40
+ - `gc_basic`
41
+ - `gc_char_diversity`
42
+ - `gc_concreteness`
43
+ - `gc_dialogue`
44
+ - `gc_discourse`
45
+ - `gc_narrative`
46
+ - `gc_polarity`
47
+ - `gc_pos`
48
+ - `gc_pronouns`
49
+ - `gc_punctuation`
50
+ - `gc_readability`
51
+ - `gc_syntax`
52
+ - `gc_temporal`
53
+ - `ngram`
54
+ - `ngram_surprisal`
55
+ - `plot_shifts`
56
+ - `rst`
57
+ - `structure`
58
+ - `surprisal`
59
+
60
+ ## Usage
61
+
62
+ ### Option 1: Load with Hugging Face datasets (Recommended)
63
+
64
+ ```python
65
+ from datasets import load_dataset
66
+
67
+ # Load a single feature group
68
+ ds = load_dataset("YOUR_USERNAME/screenplay-features", data_files="train/base.parquet")
69
+ df = ds['train'].to_pandas()
70
+
71
+ # Load multiple groups for training
72
+ ds = load_dataset("YOUR_USERNAME/screenplay-features",
73
+ data_files={
74
+ "train": ["train/base.parquet", "train/gc_polarity.parquet", "train/emotional.parquet"]
75
+ })
76
+ df = ds['train'].to_pandas()
77
+
78
+ # Load all splits for evaluation
79
+ ds = load_dataset("YOUR_USERNAME/screenplay-features",
80
+ data_files={
81
+ "train": "train/gc_polarity.parquet",
82
+ "validation": "validation/gc_polarity.parquet",
83
+ "test": "test/gc_polarity.parquet"
84
+ })
85
+ ```
86
+
87
+ ### Option 2: Load with pandas directly
88
+
89
+ ```python
90
+ import pandas as pd
91
+
92
+ # From HuggingFace URL
93
+ df = pd.read_parquet("hf://datasets/YOUR_USERNAME/screenplay-features/train/base.parquet")
94
+
95
+ # Or if you have the repo cloned locally
96
+ df = pd.read_parquet("train/base.parquet")
97
+ ```
98
+
99
+ ### Option 3: Use custom loader (Easiest)
100
+
101
+ ```python
102
+ from feature_cache.load_hf import load_groups
103
+
104
+ # Load features and labels
105
+ X, y = load_groups(
106
+ groups=["base", "gc_polarity", "emotional", "rst"],
107
+ split="train",
108
+ hf_repo="YOUR_USERNAME/screenplay-features"
109
+ )
110
+
111
+ # Load features only (no labels)
112
+ X = load_groups(
113
+ groups=["base", "gc_polarity"],
114
+ split="test",
115
+ include_label=False,
116
+ hf_repo="YOUR_USERNAME/screenplay-features"
117
+ )
118
+ ```
119
+
120
+ ## Data Structure
121
+
122
+ Each parquet file contains:
123
+
124
+ - **`movie_id`** (string): Unique movie identifier
125
+ - **`scene_index`** (int): Scene index within the movie (0-indexed)
126
+ - **`label`** (int): Salience label
127
+ - `0` = Non-salient scene
128
+ - `1` = Salient scene
129
+ - **Feature columns**: Various linguistic/narrative features (float/int)
130
+
131
+ ### Example row structure:
132
+
133
+ | movie_id | scene_index | label | feature_1 | feature_2 | ... |
134
+ |----------|-------------|-------|-----------|-----------|-----|
135
+ | tt0111161 | 42 | 1 | 0.85 | 12.3 | ... |
136
+
137
+ ## Feature Categories
138
+
139
+ The features are organized into the following categories:
140
+
141
+ ### Base Features
142
+ - Basic linguistic statistics (token count, sentence count, etc.)
143
+ - Structural position features (act, scene positions)
144
+
145
+ ### GenreClassifier (GC) Features
146
+ - **gc_basic**: Basic linguistic metrics
147
+ - **gc_char_diversity**: Character diversity metrics
148
+ - **gc_concreteness**: Concreteness scores
149
+ - **gc_dialogue**: Dialogue-specific features
150
+ - **gc_discourse**: Discourse markers and connectives
151
+ - **gc_narrative**: Narrative structure features
152
+ - **gc_polarity**: Sentiment polarity scores
153
+ - **gc_pos**: Part-of-speech distributions
154
+ - **gc_pronouns**: Pronoun usage patterns
155
+ - **gc_punctuation**: Punctuation statistics
156
+ - **gc_readability**: Readability metrics
157
+ - **gc_syntax**: Syntactic complexity features
158
+ - **gc_temporal**: Temporal expressions
159
+
160
+ ### Narrative Features
161
+ - **character_arcs**: Character development metrics
162
+ - **plot_shifts**: Plot progression indicators
163
+ - **structure**: Narrative structure features
164
+ - **emotional**: Emotional arc features
165
+
166
+ ### Linguistic Features
167
+ - **ngram**: N-gram diversity metrics
168
+ - **rst**: Rhetorical Structure Theory features
169
+ - **bert_surprisal**: BERT-based surprisal scores
170
+ - **ngram_surprisal**: N-gram-based surprisal
171
+
172
+ ## Citation
173
+
174
+ If you use this dataset, please cite:
175
+
176
+ ```bibtex
177
+ @inproceedings{mensa2021,
178
+ title={MENSA: A Multi-Domain Dataset for Narrative Understanding},
179
+ author={...},
180
+ booktitle={...},
181
+ year={2021}
182
+ }
183
+ ```
184
+
185
+ ## License
186
+
187
+ CC-BY-4.0 (following MENSA dataset license)
188
+
189
+ ## Acknowledgments
190
+
191
+ Features extracted using:
192
+ - GenreClassifier toolkit
193
+ - spaCy for NLP processing
194
+ - BERT models for surprisal estimation
195
+ - RST parser for discourse analysis
example_usage.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Example usage of screenplay salience features from Hugging Face.
4
+ """
5
+
6
+ from datasets import load_dataset
7
+ import pandas as pd
8
+ from sklearn.linear_model import LogisticRegression
9
+ from sklearn.metrics import classification_report
10
+
11
+ # ============================================================================
12
+ # Method 1: Load with datasets library
13
+ # ============================================================================
14
+
15
+ print("Loading training data...")
16
+ ds = load_dataset(
17
+ "YOUR_USERNAME/screenplay-features",
18
+ data_files={
19
+ "train": ["train/base.parquet", "train/gc_polarity.parquet"],
20
+ "test": ["test/base.parquet", "test/gc_polarity.parquet"]
21
+ }
22
+ )
23
+
24
+ train_df = ds['train'].to_pandas()
25
+ test_df = ds['test'].to_pandas()
26
+
27
+ # Separate features and labels
28
+ feature_cols = [c for c in train_df.columns if c not in ["movie_id", "scene_index", "label"]]
29
+
30
+ X_train = train_df[feature_cols]
31
+ y_train = train_df["label"]
32
+
33
+ X_test = test_df[feature_cols]
34
+ y_test = test_df["label"]
35
+
36
+ print(f"Train: {len(X_train)} samples, {len(feature_cols)} features")
37
+ print(f"Test: {len(X_test)} samples")
38
+
39
+ # ============================================================================
40
+ # Method 2: Use custom loader (easier)
41
+ # ============================================================================
42
+
43
+ # If you have the feature_cache module:
44
+ # from feature_cache.load_hf import load_groups
45
+ #
46
+ # X_train, y_train = load_groups(
47
+ # groups=["base", "gc_polarity", "emotional"],
48
+ # split="train",
49
+ # hf_repo="YOUR_USERNAME/screenplay-features"
50
+ # )
51
+ #
52
+ # X_test, y_test = load_groups(
53
+ # groups=["base", "gc_polarity", "emotional"],
54
+ # split="test",
55
+ # hf_repo="YOUR_USERNAME/screenplay-features"
56
+ # )
57
+
58
+ # ============================================================================
59
+ # Train a simple model
60
+ # ============================================================================
61
+
62
+ print("\nTraining logistic regression...")
63
+ clf = LogisticRegression(max_iter=1000, random_state=42)
64
+ clf.fit(X_train.fillna(0), y_train)
65
+
66
+ # Evaluate
67
+ y_pred = clf.predict(X_test.fillna(0))
68
+ print("\nTest Results:")
69
+ print(classification_report(y_test, y_pred, target_names=["Non-salient", "Salient"]))
70
+
71
+ # ============================================================================
72
+ # Explore features
73
+ # ============================================================================
74
+
75
+ print("\nTop 10 features by coefficient:")
76
+ feature_importance = pd.DataFrame({
77
+ 'feature': feature_cols,
78
+ 'coefficient': clf.coef_[0]
79
+ }).sort_values('coefficient', key=abs, ascending=False)
80
+
81
+ print(feature_importance.head(10))
metadata.json ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "splits": [
3
+ "train",
4
+ "validation",
5
+ "test"
6
+ ],
7
+ "groups": [
8
+ "base",
9
+ "bert_surprisal",
10
+ "character_arcs",
11
+ "emotional",
12
+ "gc_academic",
13
+ "gc_basic",
14
+ "gc_char_diversity",
15
+ "gc_concreteness",
16
+ "gc_dialogue",
17
+ "gc_discourse",
18
+ "gc_narrative",
19
+ "gc_polarity",
20
+ "gc_pos",
21
+ "gc_pronouns",
22
+ "gc_punctuation",
23
+ "gc_readability",
24
+ "gc_syntax",
25
+ "gc_temporal",
26
+ "ngram",
27
+ "ngram_surprisal",
28
+ "plot_shifts",
29
+ "rst",
30
+ "structure",
31
+ "surprisal"
32
+ ],
33
+ "stats": {
34
+ "train": {
35
+ "base": {
36
+ "n_samples": 117503,
37
+ "n_features": 21,
38
+ "size_mb": 7.204872131347656
39
+ },
40
+ "bert_surprisal": {
41
+ "n_samples": 117503,
42
+ "n_features": 29,
43
+ "size_mb": 15.838018417358398
44
+ },
45
+ "character_arcs": {
46
+ "n_samples": 117503,
47
+ "n_features": 4,
48
+ "size_mb": 0.20000839233398438
49
+ },
50
+ "emotional": {
51
+ "n_samples": 117503,
52
+ "n_features": 5,
53
+ "size_mb": 4.4886064529418945
54
+ },
55
+ "gc_academic": {
56
+ "n_samples": 117503,
57
+ "n_features": 8,
58
+ "size_mb": 0.6636857986450195
59
+ },
60
+ "gc_basic": {
61
+ "n_samples": 117503,
62
+ "n_features": 3,
63
+ "size_mb": 1.3420581817626953
64
+ },
65
+ "gc_char_diversity": {
66
+ "n_samples": 117503,
67
+ "n_features": 8,
68
+ "size_mb": 3.832453727722168
69
+ },
70
+ "gc_concreteness": {
71
+ "n_samples": 117503,
72
+ "n_features": 22,
73
+ "size_mb": 10.268058776855469
74
+ },
75
+ "gc_dialogue": {
76
+ "n_samples": 117503,
77
+ "n_features": 6,
78
+ "size_mb": 0.571375846862793
79
+ },
80
+ "gc_discourse": {
81
+ "n_samples": 117503,
82
+ "n_features": 7,
83
+ "size_mb": 1.0773344039916992
84
+ },
85
+ "gc_narrative": {
86
+ "n_samples": 117503,
87
+ "n_features": 10,
88
+ "size_mb": 0.8629426956176758
89
+ },
90
+ "gc_polarity": {
91
+ "n_samples": 117503,
92
+ "n_features": 23,
93
+ "size_mb": 19.75270366668701
94
+ },
95
+ "gc_pos": {
96
+ "n_samples": 117503,
97
+ "n_features": 8,
98
+ "size_mb": 1.6754398345947266
99
+ },
100
+ "gc_pronouns": {
101
+ "n_samples": 117503,
102
+ "n_features": 12,
103
+ "size_mb": 1.9187965393066406
104
+ },
105
+ "gc_punctuation": {
106
+ "n_samples": 117503,
107
+ "n_features": 8,
108
+ "size_mb": 0.7266168594360352
109
+ },
110
+ "gc_readability": {
111
+ "n_samples": 117503,
112
+ "n_features": 4,
113
+ "size_mb": 1.960618019104004
114
+ },
115
+ "gc_syntax": {
116
+ "n_samples": 117503,
117
+ "n_features": 575,
118
+ "size_mb": 30.994938850402832
119
+ },
120
+ "gc_temporal": {
121
+ "n_samples": 117503,
122
+ "n_features": 5,
123
+ "size_mb": 0.5964765548706055
124
+ },
125
+ "ngram": {
126
+ "n_samples": 117503,
127
+ "n_features": 9,
128
+ "size_mb": 1.9910192489624023
129
+ },
130
+ "ngram_surprisal": {
131
+ "n_samples": 117503,
132
+ "n_features": 86,
133
+ "size_mb": 1.2012948989868164
134
+ },
135
+ "plot_shifts": {
136
+ "n_samples": 117503,
137
+ "n_features": 3,
138
+ "size_mb": 1.5663232803344727
139
+ },
140
+ "rst": {
141
+ "n_samples": 117503,
142
+ "n_features": 46,
143
+ "size_mb": 3.908018112182617
144
+ },
145
+ "structure": {
146
+ "n_samples": 117503,
147
+ "n_features": 5,
148
+ "size_mb": 1.2650909423828125
149
+ },
150
+ "surprisal": {
151
+ "n_samples": 117503,
152
+ "n_features": 6,
153
+ "size_mb": 5.623771667480469
154
+ }
155
+ },
156
+ "validation": {
157
+ "base": {
158
+ "n_samples": 8052,
159
+ "n_features": 21,
160
+ "size_mb": 0.670506477355957
161
+ },
162
+ "bert_surprisal": {
163
+ "n_samples": 8052,
164
+ "n_features": 29,
165
+ "size_mb": 1.1183900833129883
166
+ },
167
+ "character_arcs": {
168
+ "n_samples": 8052,
169
+ "n_features": 4,
170
+ "size_mb": 0.0215606689453125
171
+ },
172
+ "emotional": {
173
+ "n_samples": 8052,
174
+ "n_features": 5,
175
+ "size_mb": 0.3020200729370117
176
+ },
177
+ "gc_academic": {
178
+ "n_samples": 8052,
179
+ "n_features": 8,
180
+ "size_mb": 0.07343864440917969
181
+ },
182
+ "gc_basic": {
183
+ "n_samples": 8052,
184
+ "n_features": 3,
185
+ "size_mb": 0.12804508209228516
186
+ },
187
+ "gc_char_diversity": {
188
+ "n_samples": 8052,
189
+ "n_features": 8,
190
+ "size_mb": 0.3675346374511719
191
+ },
192
+ "gc_concreteness": {
193
+ "n_samples": 8052,
194
+ "n_features": 22,
195
+ "size_mb": 0.806300163269043
196
+ },
197
+ "gc_dialogue": {
198
+ "n_samples": 8052,
199
+ "n_features": 6,
200
+ "size_mb": 0.056652069091796875
201
+ },
202
+ "gc_discourse": {
203
+ "n_samples": 8052,
204
+ "n_features": 7,
205
+ "size_mb": 0.10312652587890625
206
+ },
207
+ "gc_narrative": {
208
+ "n_samples": 8052,
209
+ "n_features": 10,
210
+ "size_mb": 0.08792686462402344
211
+ },
212
+ "gc_polarity": {
213
+ "n_samples": 8052,
214
+ "n_features": 23,
215
+ "size_mb": 1.337418556213379
216
+ },
217
+ "gc_pos": {
218
+ "n_samples": 8052,
219
+ "n_features": 8,
220
+ "size_mb": 0.1473827362060547
221
+ },
222
+ "gc_pronouns": {
223
+ "n_samples": 8052,
224
+ "n_features": 12,
225
+ "size_mb": 0.1820669174194336
226
+ },
227
+ "gc_punctuation": {
228
+ "n_samples": 8052,
229
+ "n_features": 8,
230
+ "size_mb": 0.07291126251220703
231
+ },
232
+ "gc_readability": {
233
+ "n_samples": 8052,
234
+ "n_features": 4,
235
+ "size_mb": 0.18763160705566406
236
+ },
237
+ "gc_syntax": {
238
+ "n_samples": 8052,
239
+ "n_features": 488,
240
+ "size_mb": 3.2519569396972656
241
+ },
242
+ "gc_temporal": {
243
+ "n_samples": 8052,
244
+ "n_features": 5,
245
+ "size_mb": 0.05423545837402344
246
+ },
247
+ "ngram": {
248
+ "n_samples": 8052,
249
+ "n_features": 9,
250
+ "size_mb": 0.18585777282714844
251
+ },
252
+ "ngram_surprisal": {
253
+ "n_samples": 8052,
254
+ "n_features": 86,
255
+ "size_mb": 0.14410018920898438
256
+ },
257
+ "plot_shifts": {
258
+ "n_samples": 8052,
259
+ "n_features": 3,
260
+ "size_mb": 0.13669204711914062
261
+ },
262
+ "rst": {
263
+ "n_samples": 8052,
264
+ "n_features": 46,
265
+ "size_mb": 0.3528108596801758
266
+ },
267
+ "structure": {
268
+ "n_samples": 8052,
269
+ "n_features": 5,
270
+ "size_mb": 0.159698486328125
271
+ },
272
+ "surprisal": {
273
+ "n_samples": 8052,
274
+ "n_features": 6,
275
+ "size_mb": 0.3733844757080078
276
+ }
277
+ },
278
+ "test": {
279
+ "base": {
280
+ "n_samples": 8156,
281
+ "n_features": 21,
282
+ "size_mb": 0.6687898635864258
283
+ },
284
+ "bert_surprisal": {
285
+ "n_samples": 8156,
286
+ "n_features": 29,
287
+ "size_mb": 1.1588869094848633
288
+ },
289
+ "character_arcs": {
290
+ "n_samples": 8156,
291
+ "n_features": 4,
292
+ "size_mb": 0.021600723266601562
293
+ },
294
+ "emotional": {
295
+ "n_samples": 8156,
296
+ "n_features": 5,
297
+ "size_mb": 0.3055391311645508
298
+ },
299
+ "gc_academic": {
300
+ "n_samples": 8156,
301
+ "n_features": 8,
302
+ "size_mb": 0.0720987319946289
303
+ },
304
+ "gc_basic": {
305
+ "n_samples": 8156,
306
+ "n_features": 3,
307
+ "size_mb": 0.12851428985595703
308
+ },
309
+ "gc_char_diversity": {
310
+ "n_samples": 8156,
311
+ "n_features": 8,
312
+ "size_mb": 0.36421966552734375
313
+ },
314
+ "gc_concreteness": {
315
+ "n_samples": 8156,
316
+ "n_features": 22,
317
+ "size_mb": 0.7973165512084961
318
+ },
319
+ "gc_dialogue": {
320
+ "n_samples": 8156,
321
+ "n_features": 6,
322
+ "size_mb": 0.05322837829589844
323
+ },
324
+ "gc_discourse": {
325
+ "n_samples": 8156,
326
+ "n_features": 7,
327
+ "size_mb": 0.101531982421875
328
+ },
329
+ "gc_narrative": {
330
+ "n_samples": 8156,
331
+ "n_features": 10,
332
+ "size_mb": 0.08516597747802734
333
+ },
334
+ "gc_polarity": {
335
+ "n_samples": 8156,
336
+ "n_features": 23,
337
+ "size_mb": 1.3533782958984375
338
+ },
339
+ "gc_pos": {
340
+ "n_samples": 8156,
341
+ "n_features": 8,
342
+ "size_mb": 0.14973068237304688
343
+ },
344
+ "gc_pronouns": {
345
+ "n_samples": 8156,
346
+ "n_features": 12,
347
+ "size_mb": 0.17980289459228516
348
+ },
349
+ "gc_punctuation": {
350
+ "n_samples": 8156,
351
+ "n_features": 8,
352
+ "size_mb": 0.07043266296386719
353
+ },
354
+ "gc_readability": {
355
+ "n_samples": 8156,
356
+ "n_features": 4,
357
+ "size_mb": 0.18674278259277344
358
+ },
359
+ "gc_syntax": {
360
+ "n_samples": 8156,
361
+ "n_features": 480,
362
+ "size_mb": 3.1574296951293945
363
+ },
364
+ "gc_temporal": {
365
+ "n_samples": 8156,
366
+ "n_features": 5,
367
+ "size_mb": 0.05355262756347656
368
+ },
369
+ "ngram": {
370
+ "n_samples": 8156,
371
+ "n_features": 9,
372
+ "size_mb": 0.18451881408691406
373
+ },
374
+ "ngram_surprisal": {
375
+ "n_samples": 8156,
376
+ "n_features": 86,
377
+ "size_mb": 0.1438894271850586
378
+ },
379
+ "plot_shifts": {
380
+ "n_samples": 8156,
381
+ "n_features": 3,
382
+ "size_mb": 0.13460922241210938
383
+ },
384
+ "rst": {
385
+ "n_samples": 8156,
386
+ "n_features": 46,
387
+ "size_mb": 0.3458251953125
388
+ },
389
+ "structure": {
390
+ "n_samples": 8156,
391
+ "n_features": 5,
392
+ "size_mb": 0.1639995574951172
393
+ },
394
+ "surprisal": {
395
+ "n_samples": 8156,
396
+ "n_features": 6,
397
+ "size_mb": 0.378387451171875
398
+ }
399
+ }
400
+ },
401
+ "total_size_mb": 140.1113634109497
402
+ }
test/base.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9c09d39cc3902b033776fb1001d0ca74b2d0dba6dcc08454d491ba6edabbb0c
3
+ size 701277
test/bert_surprisal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:583161c4db863a86eaa3aa065a3661af90bafc8d5db40cbc4cff37810c4657f2
3
+ size 1215181
test/character_arcs.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131d40ac3d9c05e256f68ec1d76734cdbf2115b3ca5ce04f7ebb15730c96c41f
3
+ size 22650
test/emotional.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b15ee8fecbb5790a861fde68066b2accbff43a92774ebdd4043d16b79c6f95c0
3
+ size 320381
test/gc_academic.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:387970cd7597148a64bee812335a36a5f670402f4ba4362ebb22c7e0277eab38
3
+ size 75601
test/gc_basic.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d692c38d5c9f65519b3dc0612c020970344c599e714a88bb072483b668c18661
3
+ size 134757
test/gc_char_diversity.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c545edea839c752e6013f22297b1f367c91bd08b30b8304b0333c40154ca8a97
3
+ size 381912
test/gc_concreteness.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a856d719dbf187f3380008cef7b870e52d1866a9fb6263445548b05724568813
3
+ size 836047
test/gc_dialogue.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:489203de9f055726829cf40d21f0bb685a56d1c486c53b25137d9b1b053e0494
3
+ size 55814
test/gc_discourse.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8114aa0aa2b5ac99dae8034344a4b7715bfe55f6ca9f7147e3181f819a78d2cf
3
+ size 106464
test/gc_narrative.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4869486bbe9c1f070408fbe7d52c7ddada7985143b1e7488a37b63d90ad35479
3
+ size 89303
test/gc_polarity.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4beac598517f9a10a15150b4c2a8b0ac17c2b40a74cbbe6cfcb5abb0fac999f3
3
+ size 1419120
test/gc_pos.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67679b1517eaf24dd20c326b47b215cb8ba315e26ff8ac3685492233ec5d34b0
3
+ size 157004
test/gc_pronouns.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84032fa3ea0e0c220663408c58654cecaf8df1af497a0422a0295c936132b15f
3
+ size 188537
test/gc_punctuation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef9754fc7b9bf7fc8e3f411ab419407da862421de7ab1a9381ba0533241f0ac2
3
+ size 73854
test/gc_readability.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf8a60352a4ddb6d27b86209ccd1adac37a654a94e6c05edb2f07d38799da2c
3
+ size 195814
test/gc_syntax.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8734ceeae3d23566b5384ab97a17ba92488cc89002c6493b912bea7d3e670f29
3
+ size 3310805
test/gc_temporal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb90f295a3067b47845edf845abb352ad71c36ef23227b85028037704fc7d323
3
+ size 56154
test/ngram.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:645d1cf66c46af330976dbf8f7718f64d732b18736b12047f2580a37b29a5f27
3
+ size 193482
test/ngram_surprisal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3825bb31577d8c1803637694effc49dc3ac8e5e1bcc823a408f84452f7187cfa
3
+ size 150879
test/plot_shifts.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb88f284f3e85501e5861013c6a3127d4ac8b952c4dd31304e6927a1ff4d7a28
3
+ size 141148
test/rst.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b599282df33a8069ef0cf334eb63c1dd203cddac1ddff4fabf3689fe385b05e2
3
+ size 362624
test/structure.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5483f09ddc4e7505c7bd9b98be35a1e52c14d822aa1e4c27352c72565fdf99
3
+ size 171966
test/surprisal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20bb0547f8d4d7c6b82a311a8f0a48293e0a2a0b02d1ad22e195f9a76b07491f
3
+ size 396768
train/base.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f93b51a60d0bce8cc358acc7259e1c0861e614edf18c1d4aa78f70972d5a1b2a
3
+ size 7554856
train/bert_surprisal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a752f8483740272209943a7cc0d9eabad42c105c36c461b3d54fe61d8994b2bd
3
+ size 16607366
train/character_arcs.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe56acc17007cb4d5f2fb419fde5c6aed8b8c011099393dd3c72d47bbc819e5b
3
+ size 209724
train/emotional.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fccef3f0136ba35671e741fb5714ad7b6b433ab2e9fd507e905fa63f207816df
3
+ size 4706645
train/gc_academic.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423c9ce9c8e7c9b671c51df2a12f694f1bc98f42c2f14e0edf040dd0fede5710
3
+ size 695925
train/gc_basic.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:813f80da16160a3f8843e9547ad41c05d844fb3d7126378aafc8a3472be93f86
3
+ size 1407250
train/gc_char_diversity.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c4ccbc07e4f380e7115bbdf0c7724464c7e36453bb9027d8b2b32a8e623b90
3
+ size 4018619
train/gc_concreteness.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d12b74246bc3a87140434c0d98a80f215f054a434da7e2866bd2fd9285ebbf71
3
+ size 10766840
train/gc_dialogue.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6db346c4478559a4b47d5c362d92ed56d35e5bc01e32914e4fb5ad8203642be0
3
+ size 599131
train/gc_discourse.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18d791428b707547d7d67a55adadd53c854d1b76467725631717094df1b24f1
3
+ size 1129667
train/gc_narrative.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1278e9bdf21254492d77b6ff8f2b7346a2d593c0075ca34314cbd2c9ee1df327
3
+ size 904861
train/gc_polarity.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e614ff2cd63a9530833204df1d4a41f1aa230fd39753058fb830117d5df2427
3
+ size 20712211
train/gc_pos.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922308e653f0f2f593af60ce0aba64eb4e6b926af811e0aaf816a799b1c7ca34
3
+ size 1756826
train/gc_pronouns.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6651b6c23589a1b831ae2893deec402de54c4fba1629e979203965ec5c79bb
3
+ size 2012004
train/gc_punctuation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab89880037a8bef40e7f0ee4ed8eb889936712d811ff34335061daf2251cb26b
3
+ size 761913
train/gc_readability.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b71cf550d0b64a8d8b30ee39ecec2b90007eb404fd15fcc3c2c708c6a69b0459
3
+ size 2055857
train/gc_syntax.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaca7d83c0f8304d5d139c32e58c9b5298e535688aa0b4b090b2138e2b5860d1
3
+ size 32500549
train/gc_temporal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0371423f0c61eeffe88591f21e236d30f1e5dd90ce6784093f63846c09892c
3
+ size 625451
train/ngram.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13931d9069eefb543dbc7733d823765f172d159d8c088078a10aa6de94c0a3a7
3
+ size 2087735
train/ngram_surprisal.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0117f6cbf02ab7a470c96f1bed6dfdd47b16ae13bae957dd166235911d36f18
3
+ size 1259649
train/plot_shifts.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d727dd94bbaf5cd3ddd5ed31cfcefdd0b4eefe14f09c547cae73f256fd8819b7
3
+ size 1642409
train/rst.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31dd6700e835540533f0d559a41f8eca7dfcb8310eacf5668413470858a16e53
3
+ size 4097854