|
|
--- |
|
|
language: |
|
|
- th |
|
|
dataset_info: |
|
|
features: |
|
|
- name: text |
|
|
dtype: string |
|
|
- name: ข้อความ |
|
|
dtype: string |
|
|
- name: label |
|
|
dtype: |
|
|
class_label: |
|
|
names: |
|
|
'0': neg |
|
|
'1': pos |
|
|
--- |
|
|
### รีวิว sentimental imdb ภาษาไทย |
|
|
ตั้งต้นจาก https://huggingface.co/datasets/stanfordnlp/imdb |
|
|
|
|
|
### label |
|
|
* 0 neg |
|
|
* 1 pos |
|
|
|
|
|
* train.csv |
|
|
* test.csv |
|
|
|
|
|
### ตัวอย่างการใช้งาน |
|
|
``` |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Specify the data files |
|
|
data_files = { |
|
|
"test": "test.csv", |
|
|
"train": "train.csv" |
|
|
} |
|
|
dataset = load_dataset("uisp/ag_news_th", data_files=data_files) |
|
|
|
|
|
print("Keys in loaded dataset:", dataset.keys()) # Should show keys for splits, like {'test', 'train'} |
|
|
|
|
|
# Convert a split to pandas for further processing |
|
|
test = dataset['test'].to_pandas() |
|
|
print(test.head()) |
|
|
print(len(test.index)) |
|
|
|
|
|
train = dataset['train'].to_pandas() |
|
|
train.dropna(inplace=True) # เอาตัวที่ยังไม่ได้แปล (column ข้อความ ว่างเปล่า) ออกไป |
|
|
print(train.head()) |
|
|
print(len(train.index))``` |
|
|
|