Update README.md
Browse files
README.md
CHANGED
|
@@ -1,57 +1,4 @@
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
pipeline_tag: text-generation
|
| 4 |
-
inference: true
|
| 5 |
-
widget:
|
| 6 |
-
- text: Hello!
|
| 7 |
-
example_title: Hello world
|
| 8 |
-
group: Python
|
| 9 |
---
|
| 10 |
-
|
| 11 |
-
This model is for debugging. It is randomly initialized using the config from [microsoft/Phi-3.5-MoE-instruct](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) but with smaller size.
|
| 12 |
-
|
| 13 |
-
Codes:
|
| 14 |
-
```python
|
| 15 |
-
import os
|
| 16 |
-
|
| 17 |
-
import torch
|
| 18 |
-
import transformers
|
| 19 |
-
from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
|
| 20 |
-
GenerationConfig, pipeline, set_seed)
|
| 21 |
-
|
| 22 |
-
model_id = "microsoft/Phi-3.5-MoE-instruct"
|
| 23 |
-
repo_id = "yujiepan/phi-3.5-moe-tiny-random"
|
| 24 |
-
save_path = f"/tmp/{repo_id}"
|
| 25 |
-
|
| 26 |
-
config = AutoConfig.from_pretrained(model_id, trust_remote_code=True)
|
| 27 |
-
config.hidden_size = 16
|
| 28 |
-
config.intermediate_size = 32
|
| 29 |
-
config.num_attention_heads = 4
|
| 30 |
-
config.num_hidden_layers = 2
|
| 31 |
-
config.num_key_value_heads = 4
|
| 32 |
-
config.rope_scaling['long_factor'] = [1.0299, 1.0499]
|
| 33 |
-
config.rope_scaling['short_factor'] = [1.05, 1.05]
|
| 34 |
-
|
| 35 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 36 |
-
tokenizer.save_pretrained(save_path)
|
| 37 |
-
|
| 38 |
-
model = AutoModelForCausalLM.from_config(
|
| 39 |
-
config, torch_dtype=torch.bfloat16,
|
| 40 |
-
# attn_implementation="sdpa",
|
| 41 |
-
trust_remote_code=True,
|
| 42 |
-
)
|
| 43 |
-
model.generation_config = GenerationConfig.from_pretrained(
|
| 44 |
-
model_id, trust_remote_code=True
|
| 45 |
-
)
|
| 46 |
-
|
| 47 |
-
set_seed(42)
|
| 48 |
-
with torch.no_grad():
|
| 49 |
-
for _, p in sorted(model.named_parameters()):
|
| 50 |
-
torch.nn.init.uniform_(p, -0.3, 0.3)
|
| 51 |
-
|
| 52 |
-
model.save_pretrained(save_path)
|
| 53 |
-
|
| 54 |
-
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device="cuda",
|
| 55 |
-
trust_remote_code=True, max_new_tokens=20)
|
| 56 |
-
print(pipe('Hello'))
|
| 57 |
-
```
|
|
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
pipeline_tag: text-generation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|