Datasets:
Matthew Franglen commited on
Commit ·
22c9e6d
1
Parent(s): cdea156
Add make target to generate all the laptop 2014 data
Browse files- Makefile +42 -0
- src/alignment.py +1 -1
- src/main.py +2 -0
Makefile
CHANGED
|
@@ -14,6 +14,48 @@ hub/login : .make/requirements
|
|
| 14 |
hub/create : .make/requirements
|
| 15 |
poetry run huggingface-cli repo create aste-v2 --type dataset
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
## Install Python Dependencies
|
| 19 |
requirements : .make/requirements
|
|
|
|
| 14 |
hub/create : .make/requirements
|
| 15 |
poetry run huggingface-cli repo create aste-v2 --type dataset
|
| 16 |
|
| 17 |
+
data : \
|
| 18 |
+
data/2014/laptop/aste/test.gz.parquet \
|
| 19 |
+
data/2014/laptop/aste/valid.gz.parquet \
|
| 20 |
+
data/2014/laptop/sem-eval/train.gz.parquet \
|
| 21 |
+
data/2014/laptop/aste/train.gz.parquet \
|
| 22 |
+
data/2014/laptop/sem-eval/test.gz.parquet \
|
| 23 |
+
data/2014/laptop/sem-eval/valid.gz.parquet
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
data/2014/laptop/aste/train.gz.parquet : .make/requirements
|
| 27 |
+
poetry run python -m src.main aste \
|
| 28 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/train_triplets.txt \
|
| 29 |
+
--output-file $@
|
| 30 |
+
|
| 31 |
+
data/2014/laptop/aste/valid.gz.parquet : .make/requirements
|
| 32 |
+
poetry run python -m src.main aste \
|
| 33 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/dev_triplets.txt \
|
| 34 |
+
--output-file $@
|
| 35 |
+
|
| 36 |
+
data/2014/laptop/aste/test.gz.parquet : .make/requirements
|
| 37 |
+
poetry run python -m src.main aste \
|
| 38 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/test_triplets.txt \
|
| 39 |
+
--output-file $@
|
| 40 |
+
|
| 41 |
+
data/2014/laptop/sem-eval/train.gz.parquet : .make/requirements
|
| 42 |
+
poetry run python -m src.main sem-eval \
|
| 43 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/train_triplets.txt \
|
| 44 |
+
--sem-eval-file data/raw/sem-eval/2014/Laptop_Train_v2.xml \
|
| 45 |
+
--output-file $@
|
| 46 |
+
|
| 47 |
+
data/2014/laptop/sem-eval/valid.gz.parquet : .make/requirements
|
| 48 |
+
poetry run python -m src.main sem-eval \
|
| 49 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/dev_triplets.txt \
|
| 50 |
+
--sem-eval-file data/raw/sem-eval/2014/Laptop_Train_v2.xml \
|
| 51 |
+
--output-file $@
|
| 52 |
+
|
| 53 |
+
data/2014/laptop/sem-eval/test.gz.parquet : .make/requirements
|
| 54 |
+
poetry run python -m src.main sem-eval \
|
| 55 |
+
--aste-file data/raw/aspect-sentiment-triplet-extraction/laptop-2014/test_triplets.txt \
|
| 56 |
+
--sem-eval-file data/raw/sem-eval/2014/Laptops_Test_Data_PhaseA.xml \
|
| 57 |
+
--output-file $@
|
| 58 |
+
|
| 59 |
|
| 60 |
## Install Python Dependencies
|
| 61 |
requirements : .make/requirements
|
src/alignment.py
CHANGED
|
@@ -18,7 +18,7 @@ def find_closest_text(
|
|
| 18 |
result = original_text.map(no_space_replacements)
|
| 19 |
non_perfect_matches = result.isna().sum()
|
| 20 |
|
| 21 |
-
assert non_perfect_matches / len(original) <= 0.
|
| 22 |
"Poor alignment with replacement text. "
|
| 23 |
f"{non_perfect_matches:,} of {len(original),} rows did not match well"
|
| 24 |
)
|
|
|
|
| 18 |
result = original_text.map(no_space_replacements)
|
| 19 |
non_perfect_matches = result.isna().sum()
|
| 20 |
|
| 21 |
+
assert non_perfect_matches / len(original) <= 0.20, (
|
| 22 |
"Poor alignment with replacement text. "
|
| 23 |
f"{non_perfect_matches:,} of {len(original),} rows did not match well"
|
| 24 |
)
|
src/main.py
CHANGED
|
@@ -32,6 +32,7 @@ def aste(
|
|
| 32 |
|
| 33 |
print(df.sample(3))
|
| 34 |
|
|
|
|
| 35 |
df.to_parquet(output_file, compression="gzip")
|
| 36 |
|
| 37 |
|
|
@@ -58,6 +59,7 @@ def sem_eval(
|
|
| 58 |
|
| 59 |
print(df.sample(3))
|
| 60 |
|
|
|
|
| 61 |
df.to_parquet(output_file, compression="gzip")
|
| 62 |
|
| 63 |
|
|
|
|
| 32 |
|
| 33 |
print(df.sample(3))
|
| 34 |
|
| 35 |
+
output_file.parent.mkdir(exist_ok=True, parents=True)
|
| 36 |
df.to_parquet(output_file, compression="gzip")
|
| 37 |
|
| 38 |
|
|
|
|
| 59 |
|
| 60 |
print(df.sample(3))
|
| 61 |
|
| 62 |
+
output_file.parent.mkdir(exist_ok=True, parents=True)
|
| 63 |
df.to_parquet(output_file, compression="gzip")
|
| 64 |
|
| 65 |
|