Update README.md
Browse files
README.md
CHANGED
|
@@ -90,32 +90,4 @@ Nano 1.0 is part of the **Yongle Nano Series (1.x)**, your small‑model family
|
|
| 90 |
|
| 91 |
---
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
You can run Yongle Nano 1.0 using the Hugging Face `transformers` library:
|
| 96 |
-
|
| 97 |
-
```python
|
| 98 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 99 |
-
|
| 100 |
-
model_name = "YongleAI/Yongle-Nano-1.0"
|
| 101 |
-
|
| 102 |
-
# Load model and tokenizer
|
| 103 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 104 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 105 |
-
model_name,
|
| 106 |
-
torch_dtype="auto",
|
| 107 |
-
device_map="auto"
|
| 108 |
-
)
|
| 109 |
-
|
| 110 |
-
# Prepare conversation
|
| 111 |
-
messages = [
|
| 112 |
-
{"role": "system", "content": "You are Yongle Nano 1.0, a helpful offline assistant."},
|
| 113 |
-
{"role": "user", "content": "Explain how offline AI works."}
|
| 114 |
-
]
|
| 115 |
-
|
| 116 |
-
# Generate response
|
| 117 |
-
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 118 |
-
inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 119 |
-
|
| 120 |
-
outputs = model.generate(**inputs, max_new_tokens=512)
|
| 121 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
|
|
| 90 |
|
| 91 |
---
|
| 92 |
|
| 93 |
+
e(outputs[0], skip_special_tokens=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|