Update train_setfit.py
Browse files- train_setfit.py +17 -10
train_setfit.py
CHANGED
|
@@ -1,31 +1,38 @@
|
|
|
|
|
| 1 |
from datasets import load_dataset
|
| 2 |
from setfit import SetFitModel, SetFitTrainer
|
|
|
|
| 3 |
|
| 4 |
LABELS = ["pre-1900","1900–1945","1946–1990","1991–2008","2009–2015","2016–2018","2019–2022","2023–present"]
|
| 5 |
name2id = {n:i for i,n in enumerate(LABELS)}
|
| 6 |
|
| 7 |
ds = load_dataset("json", data_files={"train":"train.jsonl","val":"val.jsonl"})
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
"sentence-transformers/paraphrase-mpnet-base-v2",
|
| 12 |
-
num_labels=len(LABELS)
|
| 13 |
-
)
|
| 14 |
|
|
|
|
| 15 |
trainer = SetFitTrainer(
|
| 16 |
model=model,
|
| 17 |
train_dataset=ds["train"],
|
| 18 |
eval_dataset=ds["val"],
|
| 19 |
metric="accuracy",
|
| 20 |
-
num_iterations=20,
|
| 21 |
-
num_epochs=2,
|
| 22 |
batch_size=16
|
| 23 |
)
|
| 24 |
-
|
| 25 |
trainer.train()
|
| 26 |
print("Eval:", trainer.evaluate())
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
repo_id = "
|
| 30 |
trainer.push_to_hub(repo_id)
|
| 31 |
print("Pushed to:", repo_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
from datasets import load_dataset
|
| 3 |
from setfit import SetFitModel, SetFitTrainer
|
| 4 |
+
from huggingface_hub import upload_file
|
| 5 |
|
| 6 |
LABELS = ["pre-1900","1900–1945","1946–1990","1991–2008","2009–2015","2016–2018","2019–2022","2023–present"]
|
| 7 |
name2id = {n:i for i,n in enumerate(LABELS)}
|
| 8 |
|
| 9 |
ds = load_dataset("json", data_files={"train":"train.jsonl","val":"val.jsonl"})
|
| 10 |
+
# Assert all labels are present in train
|
| 11 |
+
seen = set([row["label"] for row in ds["train"]])
|
| 12 |
+
assert seen.issuperset(LABELS), f"Train set missing labels: {set(LABELS)-seen}"
|
| 13 |
|
| 14 |
+
ds = ds.map(lambda x: {"label": name2id[x["label"]]})
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
model = SetFitModel.from_pretrained("sentence-transformers/paraphrase-mpnet-base-v2", num_labels=len(LABELS))
|
| 17 |
trainer = SetFitTrainer(
|
| 18 |
model=model,
|
| 19 |
train_dataset=ds["train"],
|
| 20 |
eval_dataset=ds["val"],
|
| 21 |
metric="accuracy",
|
| 22 |
+
num_iterations=20,
|
| 23 |
+
num_epochs=2,
|
| 24 |
batch_size=16
|
| 25 |
)
|
|
|
|
| 26 |
trainer.train()
|
| 27 |
print("Eval:", trainer.evaluate())
|
| 28 |
|
| 29 |
+
# push model
|
| 30 |
+
repo_id = "DelaliScratchwerk/text-period-setfit"
|
| 31 |
trainer.push_to_hub(repo_id)
|
| 32 |
print("Pushed to:", repo_id)
|
| 33 |
+
|
| 34 |
+
# push labels.json alongside the model
|
| 35 |
+
with open("labels.json","w") as f:
|
| 36 |
+
json.dump(LABELS, f)
|
| 37 |
+
upload_file(path_or_fileobj="labels.json", path_in_repo="labels.json", repo_id=repo_id, repo_type="model")
|
| 38 |
+
print("Uploaded labels.json")
|