buruzaemon commited on
Commit
7bdce57
·
verified ·
1 Parent(s): 7d6785c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,59 +1,33 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
  *.onnx filter=lfs diff=lfs merge=lfs -text
19
  *.ot filter=lfs diff=lfs merge=lfs -text
20
  *.parquet filter=lfs diff=lfs merge=lfs -text
21
  *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
  *.pt filter=lfs diff=lfs merge=lfs -text
25
  *.pth filter=lfs diff=lfs merge=lfs -text
26
  *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
  *.tflite filter=lfs diff=lfs merge=lfs -text
32
  *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
  *.xz filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ de/train.jsonl filter=lfs diff=lfs merge=lfs -text
29
+ en/train.jsonl filter=lfs diff=lfs merge=lfs -text
30
+ es/train.jsonl filter=lfs diff=lfs merge=lfs -text
31
+ fr/train.jsonl filter=lfs diff=lfs merge=lfs -text
32
+ ja/train.jsonl filter=lfs diff=lfs merge=lfs -text
33
+ zh/train.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - de
8
+ - en
9
+ - es
10
+ - fr
11
+ - ja
12
+ - zh
13
+ license:
14
+ - other
15
+ multilinguality:
16
+ - monolingual
17
+ - multilingual
18
+ size_categories:
19
+ - 100K<n<1M
20
+ - 1M<n<10M
21
+ source_datasets:
22
+ - original
23
+ task_categories:
24
+ - summarization
25
+ - text-generation
26
+ - fill-mask
27
+ - text-classification
28
+ task_ids:
29
+ - text-scoring
30
+ - language-modeling
31
+ - masked-language-modeling
32
+ - sentiment-classification
33
+ - sentiment-scoring
34
+ - topic-classification
35
+ paperswithcode_id: null
36
+ pretty_name: The Multilingual Amazon Reviews Corpus
37
+ dataset_info:
38
+ - config_name: all_languages
39
+ features:
40
+ - name: review_id
41
+ dtype: string
42
+ - name: product_id
43
+ dtype: string
44
+ - name: reviewer_id
45
+ dtype: string
46
+ - name: stars
47
+ dtype: int32
48
+ - name: review_body
49
+ dtype: string
50
+ - name: review_title
51
+ dtype: string
52
+ - name: language
53
+ dtype: string
54
+ - name: product_category
55
+ dtype: string
56
+ splits:
57
+ - name: train
58
+ num_bytes: 364405048
59
+ num_examples: 1200000
60
+ - name: validation
61
+ num_bytes: 9047533
62
+ num_examples: 30000
63
+ - name: test
64
+ num_bytes: 9099141
65
+ num_examples: 30000
66
+ download_size: 640320386
67
+ dataset_size: 382551722
68
+ - config_name: de
69
+ features:
70
+ - name: review_id
71
+ dtype: string
72
+ - name: product_id
73
+ dtype: string
74
+ - name: reviewer_id
75
+ dtype: string
76
+ - name: stars
77
+ dtype: int32
78
+ - name: review_body
79
+ dtype: string
80
+ - name: review_title
81
+ dtype: string
82
+ - name: language
83
+ dtype: string
84
+ - name: product_category
85
+ dtype: string
86
+ splits:
87
+ - name: train
88
+ num_bytes: 64485678
89
+ num_examples: 200000
90
+ - name: validation
91
+ num_bytes: 1605727
92
+ num_examples: 5000
93
+ - name: test
94
+ num_bytes: 1611044
95
+ num_examples: 5000
96
+ download_size: 94802490
97
+ dataset_size: 67702449
98
+ - config_name: en
99
+ features:
100
+ - name: review_id
101
+ dtype: string
102
+ - name: product_id
103
+ dtype: string
104
+ - name: reviewer_id
105
+ dtype: string
106
+ - name: stars
107
+ dtype: int32
108
+ - name: review_body
109
+ dtype: string
110
+ - name: review_title
111
+ dtype: string
112
+ - name: language
113
+ dtype: string
114
+ - name: product_category
115
+ dtype: string
116
+ splits:
117
+ - name: train
118
+ num_bytes: 58601089
119
+ num_examples: 200000
120
+ - name: validation
121
+ num_bytes: 1474672
122
+ num_examples: 5000
123
+ - name: test
124
+ num_bytes: 1460565
125
+ num_examples: 5000
126
+ download_size: 86094112
127
+ dataset_size: 61536326
128
+ - config_name: es
129
+ features:
130
+ - name: review_id
131
+ dtype: string
132
+ - name: product_id
133
+ dtype: string
134
+ - name: reviewer_id
135
+ dtype: string
136
+ - name: stars
137
+ dtype: int32
138
+ - name: review_body
139
+ dtype: string
140
+ - name: review_title
141
+ dtype: string
142
+ - name: language
143
+ dtype: string
144
+ - name: product_category
145
+ dtype: string
146
+ splits:
147
+ - name: train
148
+ num_bytes: 52375658
149
+ num_examples: 200000
150
+ - name: validation
151
+ num_bytes: 1303958
152
+ num_examples: 5000
153
+ - name: test
154
+ num_bytes: 1312347
155
+ num_examples: 5000
156
+ download_size: 81345461
157
+ dataset_size: 54991963
158
+ - config_name: fr
159
+ features:
160
+ - name: review_id
161
+ dtype: string
162
+ - name: product_id
163
+ dtype: string
164
+ - name: reviewer_id
165
+ dtype: string
166
+ - name: stars
167
+ dtype: int32
168
+ - name: review_body
169
+ dtype: string
170
+ - name: review_title
171
+ dtype: string
172
+ - name: language
173
+ dtype: string
174
+ - name: product_category
175
+ dtype: string
176
+ splits:
177
+ - name: train
178
+ num_bytes: 54593565
179
+ num_examples: 200000
180
+ - name: validation
181
+ num_bytes: 1340763
182
+ num_examples: 5000
183
+ - name: test
184
+ num_bytes: 1364510
185
+ num_examples: 5000
186
+ download_size: 85917293
187
+ dataset_size: 57298838
188
+ - config_name: ja
189
+ features:
190
+ - name: review_id
191
+ dtype: string
192
+ - name: product_id
193
+ dtype: string
194
+ - name: reviewer_id
195
+ dtype: string
196
+ - name: stars
197
+ dtype: int32
198
+ - name: review_body
199
+ dtype: string
200
+ - name: review_title
201
+ dtype: string
202
+ - name: language
203
+ dtype: string
204
+ - name: product_category
205
+ dtype: string
206
+ splits:
207
+ - name: train
208
+ num_bytes: 82401390
209
+ num_examples: 200000
210
+ - name: validation
211
+ num_bytes: 2035391
212
+ num_examples: 5000
213
+ - name: test
214
+ num_bytes: 2048048
215
+ num_examples: 5000
216
+ download_size: 177773783
217
+ dataset_size: 86484829
218
+ - config_name: zh
219
+ features:
220
+ - name: review_id
221
+ dtype: string
222
+ - name: product_id
223
+ dtype: string
224
+ - name: reviewer_id
225
+ dtype: string
226
+ - name: stars
227
+ dtype: int32
228
+ - name: review_body
229
+ dtype: string
230
+ - name: review_title
231
+ dtype: string
232
+ - name: language
233
+ dtype: string
234
+ - name: product_category
235
+ dtype: string
236
+ splits:
237
+ - name: train
238
+ num_bytes: 51947668
239
+ num_examples: 200000
240
+ - name: validation
241
+ num_bytes: 1287106
242
+ num_examples: 5000
243
+ - name: test
244
+ num_bytes: 1302711
245
+ num_examples: 5000
246
+ download_size: 114387247
247
+ dataset_size: 54537485
248
+ config_names:
249
+ - all_languages
250
+ - de
251
+ - en
252
+ - es
253
+ - fr
254
+ - ja
255
+ - zh
256
+ viewer: false
257
+ ---
258
+
259
+ # Dataset Card for The Multilingual Amazon Reviews Corpus
260
+
261
+ ## Table of Contents
262
+ - [Dataset Card for amazon_reviews_multi](#dataset-card-for-amazon_reviews_multi)
263
+ - [Table of Contents](#table-of-contents)
264
+ - [Dataset Description](#dataset-description)
265
+ - [Dataset Summary](#dataset-summary)
266
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
267
+ - [Languages](#languages)
268
+ - [Dataset Structure](#dataset-structure)
269
+ - [Data Instances](#data-instances)
270
+ - [plain_text](#plain_text)
271
+ - [Data Fields](#data-fields)
272
+ - [plain_text](#plain_text-1)
273
+ - [Data Splits](#data-splits)
274
+ - [Dataset Creation](#dataset-creation)
275
+ - [Curation Rationale](#curation-rationale)
276
+ - [Source Data](#source-data)
277
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
278
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
279
+ - [Annotations](#annotations)
280
+ - [Annotation process](#annotation-process)
281
+ - [Who are the annotators?](#who-are-the-annotators)
282
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
283
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
284
+ - [Social Impact of Dataset](#social-impact-of-dataset)
285
+ - [Discussion of Biases](#discussion-of-biases)
286
+ - [Other Known Limitations](#other-known-limitations)
287
+ - [Additional Information](#additional-information)
288
+ - [Dataset Curators](#dataset-curators)
289
+ - [Licensing Information](#licensing-information)
290
+ - [Citation Information](#citation-information)
291
+ - [Contributions](#contributions)
292
+
293
+ ## Dataset Description
294
+
295
+ - **Webpage:** https://registry.opendata.aws/amazon-reviews-ml/
296
+ - **Paper:** https://arxiv.org/abs/2010.02573
297
+ - **Point of Contact:** [multilingual-reviews-dataset@amazon.com](mailto:multilingual-reviews-dataset@amazon.com)
298
+
299
+ ### Dataset Summary
300
+
301
+ <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400">
302
+ <p><b>Defunct:</b> Dataset "amazon_reviews_multi" is defunct and no longer accessible due to the decision of data providers.</p>
303
+ </div>
304
+
305
+ We provide an Amazon product reviews dataset for multilingual text classification. The dataset contains reviews in English, Japanese, German, French, Chinese and Spanish, collected between November 1, 2015 and November 1, 2019. Each record in the dataset contains the review text, the review title, the star rating, an anonymized reviewer ID, an anonymized product ID and the coarse-grained product category (e.g. ‘books’, ‘appliances’, etc.) The corpus is balanced across stars, so each star rating constitutes 20% of the reviews in each language.
306
+
307
+ For each language, there are 200,000, 5,000 and 5,000 reviews in the training, development and test sets respectively. The maximum number of reviews per reviewer is 20 and the maximum number of reviews per product is 20. All reviews are truncated after 2,000 characters, and all reviews are at least 20 characters long.
308
+
309
+ Note that the language of a review does not necessarily match the language of its marketplace (e.g. reviews from amazon.de are primarily written in German, but could also be written in English, etc.). For this reason, we applied a language detection algorithm based on the work in Bojanowski et al. (2017) to determine the language of the review text and we removed reviews that were not written in the expected language.
310
+
311
+ ### Supported Tasks and Leaderboards
312
+
313
+ [More Information Needed]
314
+
315
+ ### Languages
316
+
317
+ The dataset contains reviews in English, Japanese, German, French, Chinese and Spanish.
318
+
319
+ ## Dataset Structure
320
+
321
+ ### Data Instances
322
+
323
+ Each data instance corresponds to a review. The original JSON for an instance looks like so (German example):
324
+
325
+ ```json
326
+ {
327
+ "review_id": "de_0784695",
328
+ "product_id": "product_de_0572654",
329
+ "reviewer_id": "reviewer_de_0645436",
330
+ "stars": "1",
331
+ "review_body": "Leider, leider nach einmal waschen ausgeblichen . Es sieht super h\u00fcbsch aus , nur leider stinkt es ganz schrecklich und ein Waschgang in der Maschine ist notwendig ! Nach einem mal waschen sah es aus als w\u00e4re es 10 Jahre alt und hatte 1000 e von Waschg\u00e4ngen hinter sich :( echt schade !",
332
+ "review_title": "Leider nicht zu empfehlen",
333
+ "language": "de",
334
+ "product_category": "home"
335
+ }
336
+ ```
337
+
338
+ ### Data Fields
339
+
340
+ - `review_id`: A string identifier of the review.
341
+ - `product_id`: A string identifier of the product being reviewed.
342
+ - `reviewer_id`: A string identifier of the reviewer.
343
+ - `stars`: An int between 1-5 indicating the number of stars.
344
+ - `review_body`: The text body of the review.
345
+ - `review_title`: The text title of the review.
346
+ - `language`: The string identifier of the review language.
347
+ - `product_category`: String representation of the product's category.
348
+
349
+ ### Data Splits
350
+
351
+ Each language configuration comes with its own `train`, `validation`, and `test` splits. The `all_languages` split
352
+ is simply a concatenation of the corresponding split across all languages. That is, the `train` split for
353
+ `all_languages` is a concatenation of the `train` splits for each of the languages and likewise for `validation` and
354
+ `test`.
355
+
356
+ ## Dataset Creation
357
+
358
+ ### Curation Rationale
359
+
360
+ The dataset is motivated by the desire to advance sentiment analysis and text classification in other (non-English)
361
+ languages.
362
+
363
+ ### Source Data
364
+
365
+ #### Initial Data Collection and Normalization
366
+
367
+ The authors gathered the reviews from the marketplaces in the US, Japan, Germany, France, Spain, and China for the
368
+ English, Japanese, German, French, Spanish, and Chinese languages, respectively. They then ensured the correct
369
+ language by applying a language detection algorithm, only retaining those of the target language. In a random sample
370
+ of the resulting reviews, the authors observed a small percentage of target languages that were incorrectly filtered
371
+ out and a very few mismatched languages that were incorrectly retained.
372
+
373
+ #### Who are the source language producers?
374
+
375
+ The original text comes from Amazon customers reviewing products on the marketplace across a variety of product
376
+ categories.
377
+
378
+ ### Annotations
379
+
380
+ #### Annotation process
381
+
382
+ Each of the fields included are submitted by the user with the review or otherwise associated with the review. No
383
+ manual or machine-driven annotation was necessary.
384
+
385
+ #### Who are the annotators?
386
+
387
+ N/A
388
+
389
+ ### Personal and Sensitive Information
390
+
391
+ According to the original dataset [license terms](https://docs.opendata.aws/amazon-reviews-ml/license.txt), you may not:
392
+ - link or associate content in the Reviews Corpus with any personal information (including Amazon customer accounts), or
393
+ - attempt to determine the identity of the author of any content in the Reviews Corpus.
394
+
395
+ If you violate any of the foregoing conditions, your license to access and use the Reviews Corpus will automatically
396
+ terminate without prejudice to any of the other rights or remedies Amazon may have.
397
+
398
+ ## Considerations for Using the Data
399
+
400
+ ### Social Impact of Dataset
401
+
402
+ This dataset is part of an effort to encourage text classification research in languages other than English. Such
403
+ work increases the accessibility of natural language technology to more regions and cultures. Unfortunately, each of
404
+ the languages included here is relatively high resource and well studied.
405
+
406
+ ### Discussion of Biases
407
+
408
+ The dataset contains only reviews from verified purchases (as described in the paper, section 2.1), and the reviews
409
+ should conform the [Amazon Community Guidelines](https://www.amazon.com/gp/help/customer/display.html?nodeId=GLHXEX85MENUE4XF).
410
+
411
+ ### Other Known Limitations
412
+
413
+ The dataset is constructed so that the distribution of star ratings is balanced. This feature has some advantages for
414
+ purposes of classification, but some types of language may be over or underrepresented relative to the original
415
+ distribution of reviews to achieve this balance.
416
+
417
+ ## Additional Information
418
+
419
+ ### Dataset Curators
420
+
421
+ Published by Phillip Keung, Yichao Lu, György Szarvas, and Noah A. Smith. Managed by Amazon.
422
+
423
+ ### Licensing Information
424
+
425
+ Amazon has licensed this dataset under its own agreement for non-commercial research usage only. This licence is quite restrictive preventing use anywhere a fee is received including paid for internships etc. A copy of the agreement can be found at the dataset webpage here:
426
+ https://docs.opendata.aws/amazon-reviews-ml/license.txt
427
+
428
+ By accessing the Multilingual Amazon Reviews Corpus ("Reviews Corpus"), you agree that the Reviews Corpus is an Amazon Service subject to the [Amazon.com Conditions of Use](https://www.amazon.com/gp/help/customer/display.html/ref=footer_cou?ie=UTF8&nodeId=508088) and you agree to be bound by them, with the following additional conditions:
429
+
430
+ In addition to the license rights granted under the Conditions of Use, Amazon or its content providers grant you a limited, non-exclusive, non-transferable, non-sublicensable, revocable license to access and use the Reviews Corpus for purposes of academic research. You may not resell, republish, or make any commercial use of the Reviews Corpus or its contents, including use of the Reviews Corpus for commercial research, such as research related to a funding or consultancy contract, internship, or other relationship in which the results are provided for a fee or delivered to a for-profit organization. You may not (a) link or associate content in the Reviews Corpus with any personal information (including Amazon customer accounts), or (b) attempt to determine the identity of the author of any content in the Reviews Corpus. If you violate any of the foregoing conditions, your license to access and use the Reviews Corpus will automatically terminate without prejudice to any of the other rights or remedies Amazon may have.
431
+
432
+ ### Citation Information
433
+
434
+ Please cite the following paper (arXiv) if you found this dataset useful:
435
+
436
+ Phillip Keung, Yichao Lu, György Szarvas and Noah A. Smith. “The Multilingual Amazon Reviews Corpus.” In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, 2020.
437
+
438
+ ```
439
+ @inproceedings{marc_reviews,
440
+ title={The Multilingual Amazon Reviews Corpus},
441
+ author={Keung, Phillip and Lu, Yichao and Szarvas, György and Smith, Noah A.},
442
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
443
+ year={2020}
444
+ }
445
+ ```
446
+
447
+ ### Contributions
448
+
449
+ Thanks to [@joeddav](https://github.com/joeddav) for adding this dataset.
amazon_reviews_multi.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The Multilingual Amazon Reviews Corpus"""
17
+
18
+
19
+ import json
20
+
21
+ import datasets
22
+ from datasets.exceptions import DefunctDatasetError
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{marc_reviews,
27
+ title={The Multilingual Amazon Reviews Corpus},
28
+ author={Keung, Phillip and Lu, Yichao and Szarvas, György and Smith, Noah A.},
29
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
30
+ year={2020}
31
+ }
32
+ """
33
+
34
+ _LICENSE = """\
35
+ By accessing the Multilingual Amazon Reviews Corpus ("Reviews Corpus"), you agree that the Reviews Corpus is an Amazon Service subject to the Amazon.com Conditions of Use (https://www.amazon.com/gp/help/customer/display.html/ref=footer_cou?ie=UTF8&nodeId=508088) and you agree to be bound by them, with the following additional conditions:
36
+
37
+ In addition to the license rights granted under the Conditions of Use, Amazon or its content providers grant you a limited, non-exclusive, non-transferable, non-sublicensable, revocable license to access and use the Reviews Corpus for purposes of academic research. You may not resell, republish, or make any commercial use of the Reviews Corpus or its contents, including use of the Reviews Corpus for commercial research, such as research related to a funding or consultancy contract, internship, or other relationship in which the results are provided for a fee or delivered to a for-profit organization. You may not (a) link or associate content in the Reviews Corpus with any personal information (including Amazon customer accounts), or (b) attempt to determine the identity of the author of any content in the Reviews Corpus. If you violate any of the foregoing conditions, your license to access and use the Reviews Corpus will automatically terminate without prejudice to any of the other rights or remedies Amazon may have.
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ We provide an Amazon product reviews dataset for multilingual text classification. The dataset contains reviews in English, Japanese, German, French, Chinese and Spanish, collected between November 1, 2015 and November 1, 2019. Each record in the dataset contains the review text, the review title, the star rating, an anonymized reviewer ID, an anonymized product ID and the coarse-grained product category (e.g. ‘books’, ‘appliances’, etc.) The corpus is balanced across stars, so each star rating constitutes 20% of the reviews in each language.
42
+
43
+ For each language, there are 200,000, 5,000 and 5,000 reviews in the training, development and test sets respectively. The maximum number of reviews per reviewer is 20 and the maximum number of reviews per product is 20. All reviews are truncated after 2,000 characters, and all reviews are at least 20 characters long.
44
+
45
+ Note that the language of a review does not necessarily match the language of its marketplace (e.g. reviews from amazon.de are primarily written in German, but could also be written in English, etc.). For this reason, we applied a language detection algorithm based on the work in Bojanowski et al. (2017) to determine the language of the review text and we removed reviews that were not written in the expected language.
46
+ """
47
+
48
+ _LANGUAGES = {
49
+ "de": "German",
50
+ "en": "English",
51
+ "es": "Spanish",
52
+ "fr": "French",
53
+ "ja": "Japanese",
54
+ "zh": "Chinese",
55
+ }
56
+ _ALL_LANGUAGES = "all_languages"
57
+ _VERSION = "1.0.0"
58
+ _HOMEPAGE_URL = "https://registry.opendata.aws/amazon-reviews-ml/"
59
+ _DOWNLOAD_URL = "https://amazon-reviews-ml.s3-us-west-2.amazonaws.com/json/{split}/dataset_{lang}_{split}.json"
60
+
61
+
62
+ class AmazonReviewsMultiConfig(datasets.BuilderConfig):
63
+ """BuilderConfig for AmazonReviewsMultiConfig."""
64
+
65
+ def __init__(self, languages=None, **kwargs):
66
+ super(AmazonReviewsMultiConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
67
+ self.languages = languages
68
+
69
+
70
+ class AmazonReviewsMulti(datasets.GeneratorBasedBuilder):
71
+ """The Multilingual Amazon Reviews Corpus"""
72
+
73
+ BUILDER_CONFIGS = [
74
+ AmazonReviewsMultiConfig(
75
+ name=_ALL_LANGUAGES,
76
+ languages=_LANGUAGES,
77
+ description="A collection of Amazon reviews specifically designed to aid research in multilingual text classification.",
78
+ )
79
+ ] + [
80
+ AmazonReviewsMultiConfig(
81
+ name=lang,
82
+ languages=[lang],
83
+ description=f"{_LANGUAGES[lang]} examples from a collection of Amazon reviews specifically designed to aid research in multilingual text classification",
84
+ )
85
+ for lang in _LANGUAGES
86
+ ]
87
+ BUILDER_CONFIG_CLASS = AmazonReviewsMultiConfig
88
+ DEFAULT_CONFIG_NAME = _ALL_LANGUAGES
89
+
90
+ def _info(self):
91
+ #raise DefunctDatasetError(
92
+ # "Dataset 'amazon_reviews_multi' is defunct and no longer accessible due to the decision of data providers"
93
+ #)
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=datasets.Features(
97
+ {
98
+ "review_id": datasets.Value("string"),
99
+ "product_id": datasets.Value("string"),
100
+ "reviewer_id": datasets.Value("string"),
101
+ "stars": datasets.Value("int32"),
102
+ "review_body": datasets.Value("string"),
103
+ "review_title": datasets.Value("string"),
104
+ "language": datasets.Value("string"),
105
+ "product_category": datasets.Value("string"),
106
+ }
107
+ ),
108
+ supervised_keys=None,
109
+ license=_LICENSE,
110
+ homepage=_HOMEPAGE_URL,
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ train_urls = [_DOWNLOAD_URL.format(split="train", lang=lang) for lang in self.config.languages]
116
+ dev_urls = [_DOWNLOAD_URL.format(split="dev", lang=lang) for lang in self.config.languages]
117
+ test_urls = [_DOWNLOAD_URL.format(split="test", lang=lang) for lang in self.config.languages]
118
+
119
+ train_paths = dl_manager.download_and_extract(train_urls)
120
+ dev_paths = dl_manager.download_and_extract(dev_urls)
121
+ test_paths = dl_manager.download_and_extract(test_urls)
122
+
123
+ return [
124
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths}),
125
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths}),
126
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths}),
127
+ ]
128
+
129
+ def _generate_examples(self, file_paths):
130
+ row_count = 0
131
+ for file_path in file_paths:
132
+ with open(file_path, "r", encoding="utf-8") as f:
133
+ for line in f:
134
+ yield row_count, json.loads(line)
135
+ row_count += 1
de/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
de/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a341509433f950e53a9924295920570dade6800549cd1c892cd40396e15fbb
3
+ size 86905126
de/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
en/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
en/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b52a8ce0b27b92cff65d90b45e909f85c278e8f6b2c8cc27625d4ea295340b27
3
+ size 78598589
en/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
es/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
es/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ec66fded5c2efc50b676d064a4e738c807c272adbee1b18c8fe0d070fa3c9db
3
+ size 74078583
es/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
fr/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
fr/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5d494f55f298d69dd078f9bbacff7bf9f88d7e49d742a424de304b305462e5f
3
+ size 78459816
fr/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
ja/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
ja/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d75556618516e0567a3a58507e0b9b451036d4b8a183b61bbe4ed7afb95ec26
3
+ size 165981099
ja/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
zh/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
zh/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed5a23e1811357db3c1a2776c679e28fce301067c197aee82f0b2655f321ca1
3
+ size 105555515
zh/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff