Update README.md
Browse files
README.md
CHANGED
|
@@ -1,3 +1,49 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: cc-by-4.0
|
| 3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-4.0
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
# Finetuned `Gemma-2-2B` for generating subspaces given any natural language descriptions for `Gemma-2-2B-it`
|
| 6 |
+
|
| 7 |
+
In the AxBench paper, we finetuned a subspace generator. The subspace generator is a hyper-network that will generate a subspace for you given a concept description in natural language. **High-quality subspace generator can bypass all dictionary training!**
|
| 8 |
+
|
| 9 |
+
## How to use the subspace generator?
|
| 10 |
+
|
| 11 |
+
```py
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 15 |
+
|
| 16 |
+
class RegressionWrapper(torch.nn.Module):
|
| 17 |
+
def __init__(self, base_model, hidden_size, output_dim):
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.base_model = base_model
|
| 20 |
+
self.regression_head = torch.nn.Linear(hidden_size, output_dim)
|
| 21 |
+
|
| 22 |
+
def forward(self, input_ids, attention_mask):
|
| 23 |
+
outputs = self.base_model.model(
|
| 24 |
+
input_ids=input_ids,
|
| 25 |
+
attention_mask=attention_mask,
|
| 26 |
+
output_hidden_states=True,
|
| 27 |
+
return_dict=True
|
| 28 |
+
)
|
| 29 |
+
last_hiddens = outputs.hidden_states[-1]
|
| 30 |
+
last_token_representations = last_hiddens[:, -1]
|
| 31 |
+
preds = self.regression_head(last_token_representations)
|
| 32 |
+
preds = F.normalize(preds, p=2, dim=-1)
|
| 33 |
+
return preds
|
| 34 |
+
|
| 35 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
| 36 |
+
f"google/gemma-2-2b", torch_dtype=torch.bfloat16)
|
| 37 |
+
base_tokenizer = AutoTokenizer.from_pretrained(
|
| 38 |
+
f"google/gemma-2-2b", model_max_length=512)
|
| 39 |
+
|
| 40 |
+
subspace_gen = RegressionWrapper(
|
| 41 |
+
base_model, hidden_size, output_dim).bfloat16().to("cuda")
|
| 42 |
+
subspace_gen.load_state_dict(torch.load('model.pth'))
|
| 43 |
+
|
| 44 |
+
your_new_concept = "terms related to Stanford University"
|
| 45 |
+
|
| 46 |
+
inputs = base_tokenizer(your_new_concept, return_tensors="pt").to("cuda")
|
| 47 |
+
input_ids, attention_mask = inputs["input_ids"], inputs["attention_mask"]
|
| 48 |
+
subspace_gen(input_ids, attention_mask)[0]
|
| 49 |
+
```
|