Update README.md
Browse files
README.md
CHANGED
|
@@ -77,6 +77,7 @@ tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
|
| 77 |
# Initialize with same config as training
|
| 78 |
model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
|
| 79 |
```
|
|
|
|
| 80 |
### 2. Bayesian inference
|
| 81 |
To obtain probabilistic outputs and uncertainty metrics, use the `mc_forward` method:
|
| 82 |
```python
|
|
@@ -86,14 +87,14 @@ N_SAMPLES = 50
|
|
| 86 |
inputs = tokenizer("I am so happy you are here!", return_tensors="pt")
|
| 87 |
|
| 88 |
model.eval()
|
| 89 |
-
with torch.
|
| 90 |
-
|
| 91 |
|
| 92 |
# Bayesian Post-processing
|
| 93 |
-
|
| 94 |
|
| 95 |
-
mean_probs =
|
| 96 |
-
uncertainty =
|
| 97 |
|
| 98 |
|
| 99 |
# Formatted Output
|
|
|
|
| 77 |
# Initialize with same config as training
|
| 78 |
model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
|
| 79 |
```
|
| 80 |
+
|
| 81 |
### 2. Bayesian inference
|
| 82 |
To obtain probabilistic outputs and uncertainty metrics, use the `mc_forward` method:
|
| 83 |
```python
|
|
|
|
| 87 |
inputs = tokenizer("I am so happy you are here!", return_tensors="pt")
|
| 88 |
|
| 89 |
model.eval()
|
| 90 |
+
with torch.inference_mode():
|
| 91 |
+
mc_logits = model.mc_forward(inputs['input_ids'], inputs['attention_mask'], n_samples=N_SAMPLES) # Automatically keeps Dropout active, even when in model.eval
|
| 92 |
|
| 93 |
# Bayesian Post-processing
|
| 94 |
+
all_probs = torch.sigmoid(mc_logits) # (n_samples, B, 28)
|
| 95 |
|
| 96 |
+
mean_probs = all_probs.mean(dim=0) # Mean Predicted Probability
|
| 97 |
+
uncertainty = all_probs.std(dim=0) # Epistemic Uncertainty
|
| 98 |
|
| 99 |
|
| 100 |
# Formatted Output
|