yezdata commited on
Commit
767e28e
·
verified ·
1 Parent(s): e817e4f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -5
README.md CHANGED
@@ -77,6 +77,7 @@ tokenizer = AutoTokenizer.from_pretrained(repo_id)
77
  # Initialize with same config as training
78
  model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
79
  ```
 
80
  ### 2. Bayesian inference
81
  To obtain probabilistic outputs and uncertainty metrics, use the `mc_forward` method:
82
  ```python
@@ -86,14 +87,14 @@ N_SAMPLES = 50
86
  inputs = tokenizer("I am so happy you are here!", return_tensors="pt")
87
 
88
  model.eval()
89
- with torch.no_grad():
90
- logits_mc = model.mc_forward(inputs['input_ids'], inputs['attention_mask'], n_samples=N_SAMPLES) # Automatically keeps Dropout active, even when in model.eval
91
 
92
  # Bayesian Post-processing
93
- probs_all = torch.sigmoid(logits_mc) # (n_samples, B, 28)
94
 
95
- mean_probs = probs_all.mean(dim=0) # Mean Predicted Probability
96
- uncertainty = probs_all.std(dim=0) # Epistemic Uncertainty (Standard Deviation)
97
 
98
 
99
  # Formatted Output
 
77
  # Initialize with same config as training
78
  model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
79
  ```
80
+
81
  ### 2. Bayesian inference
82
  To obtain probabilistic outputs and uncertainty metrics, use the `mc_forward` method:
83
  ```python
 
87
  inputs = tokenizer("I am so happy you are here!", return_tensors="pt")
88
 
89
  model.eval()
90
+ with torch.inference_mode():
91
+ mc_logits = model.mc_forward(inputs['input_ids'], inputs['attention_mask'], n_samples=N_SAMPLES) # Automatically keeps Dropout active, even when in model.eval
92
 
93
  # Bayesian Post-processing
94
+ all_probs = torch.sigmoid(mc_logits) # (n_samples, B, 28)
95
 
96
+ mean_probs = all_probs.mean(dim=0) # Mean Predicted Probability
97
+ uncertainty = all_probs.std(dim=0) # Epistemic Uncertainty
98
 
99
 
100
  # Formatted Output