Update README.md
Browse files
README.md
CHANGED
|
@@ -61,32 +61,26 @@ Anda dapat menggunakan model ini dengan pustaka `transformers` dari Hugging Face
|
|
| 61 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 62 |
import torch
|
| 63 |
|
| 64 |
-
|
| 65 |
-
model_name_or_path = "New/models/finetuned_perturb_double_weighted_run_roberta_large/epoch-6" # Contoh epoch terakhir
|
| 66 |
-
# Atau jika diunggah ke Hub: "username/model_name"
|
| 67 |
|
| 68 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
| 69 |
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
|
| 70 |
|
| 71 |
-
# Pindahkan model ke GPU jika tersedia
|
| 72 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 73 |
model.to(device)
|
| 74 |
|
| 75 |
premise = "Timnas Indonesia berhasil memenangkan pertandingan sepak bola."
|
| 76 |
hypothesis = "Indonesia kalah dalam laga tersebut."
|
| 77 |
|
| 78 |
-
# Tokenisasi input
|
| 79 |
inputs = tokenizer(premise, hypothesis, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
| 80 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 81 |
|
| 82 |
-
|
| 83 |
-
model.eval() # Set model ke mode evaluasi
|
| 84 |
with torch.no_grad():
|
| 85 |
outputs = model(**inputs)
|
| 86 |
logits = outputs.logits
|
| 87 |
predictions = torch.argmax(logits, dim=-1)
|
| 88 |
|
| 89 |
-
# Interpretasi hasil (asumsi label 0 = non-entailment, label 1 = entailment)
|
| 90 |
if predictions.item() == 1:
|
| 91 |
print("Hipotesis dapat disimpulkan dari premis (Entailment).")
|
| 92 |
else:
|
|
|
|
| 61 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 62 |
import torch
|
| 63 |
|
| 64 |
+
model_name = "fabhiansan/indoBERT-Base-FactChecking-Summarization"
|
|
|
|
|
|
|
| 65 |
|
| 66 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
| 67 |
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
|
| 68 |
|
|
|
|
| 69 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 70 |
model.to(device)
|
| 71 |
|
| 72 |
premise = "Timnas Indonesia berhasil memenangkan pertandingan sepak bola."
|
| 73 |
hypothesis = "Indonesia kalah dalam laga tersebut."
|
| 74 |
|
|
|
|
| 75 |
inputs = tokenizer(premise, hypothesis, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
| 76 |
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 77 |
|
| 78 |
+
model.eval()
|
|
|
|
| 79 |
with torch.no_grad():
|
| 80 |
outputs = model(**inputs)
|
| 81 |
logits = outputs.logits
|
| 82 |
predictions = torch.argmax(logits, dim=-1)
|
| 83 |
|
|
|
|
| 84 |
if predictions.item() == 1:
|
| 85 |
print("Hipotesis dapat disimpulkan dari premis (Entailment).")
|
| 86 |
else:
|