Update README.md
Browse files
README.md
CHANGED
|
@@ -224,59 +224,40 @@ Next 2.5's visual cortex allows it to rival or beat proprietary nano-models from
|
|
| 224 |
Make sure you have the latest `transformers`, `torch`, `torchvision`, and `pillow` installed.
|
| 225 |
|
| 226 |
```python
|
| 227 |
-
from transformers import
|
| 228 |
-
import torch
|
| 229 |
from PIL import Image
|
| 230 |
-
import
|
| 231 |
-
|
| 232 |
-
model_id = "
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
| 236 |
-
|
| 237 |
-
model_id,
|
| 238 |
-
torch_dtype=torch.float16,
|
| 239 |
-
device_map="auto"
|
| 240 |
-
)
|
| 241 |
-
|
| 242 |
-
# Prepare Image
|
| 243 |
-
url = "https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3.5/demo/RealWorld/RealWorld-04.png"
|
| 244 |
-
image = Image.open(requests.get(url, stream=True).raw)
|
| 245 |
-
|
| 246 |
-
# Chat Template (With Thinking Mode enabled by default)
|
| 247 |
-
messages =[
|
| 248 |
-
{
|
| 249 |
-
"role": "system",
|
| 250 |
-
"content": "Sen Next 2.5'sin. Lamapi tarafından Türkiye'de geliştirilmiş, gelişmiş bir yapay zekasın. Yanıtlarını adım adım düşünerek ver."
|
| 251 |
-
},
|
| 252 |
-
{
|
| 253 |
-
"role": "user",
|
| 254 |
-
"content":[
|
| 255 |
-
{"type": "image", "image": image},
|
| 256 |
-
{"type": "text", "text": "Bu resimde tam olarak ne görüyorsun? Mantıksal çıkarımlar yaparak açıkla."}
|
| 257 |
-
]
|
| 258 |
-
}
|
| 259 |
-
]
|
| 260 |
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
)
|
| 272 |
-
|
| 273 |
-
# Decode
|
| 274 |
-
generated_ids_trimmed =[
|
| 275 |
-
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
| 276 |
]
|
| 277 |
-
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 278 |
|
| 279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
```
|
| 281 |
|
| 282 |
---
|
|
|
|
| 224 |
Make sure you have the latest `transformers`, `torch`, `torchvision`, and `pillow` installed.
|
| 225 |
|
| 226 |
```python
|
| 227 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor
|
|
|
|
| 228 |
from PIL import Image
|
| 229 |
+
import torch
|
| 230 |
+
|
| 231 |
+
model_id = "thelamapi/next2.5"
|
| 232 |
+
|
| 233 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
| 234 |
+
processor = AutoProcessor.from_pretrained(model_id) # For vision.
|
| 235 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
+
|
| 238 |
+
# Create a message in chat format
|
| 239 |
+
messages = [
|
| 240 |
+
{"role": "system","content": [{"type": "text", "text": "You are Next2.5, a smart and concise AI assistant trained by Lamapi. Always respond in the user's language. Proudly made in Turkey."}]},
|
| 241 |
+
|
| 242 |
+
{
|
| 243 |
+
"role": "user","content": [
|
| 244 |
+
{"type": "text", "text": "Write a highly optimized Rust function to calculate the Fibonacci sequence using memoization"}
|
| 245 |
+
]
|
| 246 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
]
|
|
|
|
| 248 |
|
| 249 |
+
# Prepare input with Tokenizer
|
| 250 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
| 251 |
+
inputs = processor(text=prompt, return_tensors="pt")
|
| 252 |
+
|
| 253 |
+
# Remove 'mm_token_type_ids' if it's not needed for text-only generation
|
| 254 |
+
if "mm_token_type_ids" in inputs:
|
| 255 |
+
del inputs["mm_token_type_ids"]
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
# Output from the model
|
| 259 |
+
output = model.generate(**inputs, do_sample=True, temperature=0.7, max_new_tokens=128)
|
| 260 |
+
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
| 261 |
```
|
| 262 |
|
| 263 |
---
|