Lamapi commited on
Commit
060aaf5
·
verified ·
1 Parent(s): b0652d1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +30 -49
README.md CHANGED
@@ -182,59 +182,40 @@ Next2-Air features a highly capable visual encoder, allowing it to process spati
182
  Make sure you have `transformers`, `torch`, `torchvision`, and `pillow` installed.
183
 
184
  ```python
185
- from transformers import AutoProcessor, AutoModelForCausalLM
186
- import torch
187
  from PIL import Image
188
- import requests
189
-
190
- model_id = "Lamapi/next2-air"
191
-
192
- # Load Model & Processor
193
- processor = AutoProcessor.from_pretrained(model_id)
194
- model = AutoModelForCausalLM.from_pretrained(
195
- model_id,
196
- torch_dtype=torch.float16,
197
- device_map="auto" # Will easily load on almost any modern GPU
198
- )
199
-
200
- # Prepare Image
201
- url = "https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3.5/demo/RealWorld/RealWorld-04.png"
202
- image = Image.open(requests.get(url, stream=True).raw)
203
-
204
- # Chat Template
205
- messages =[
206
- {
207
- "role": "system",
208
- "content": "Sen Next2-Air'sin. Lamapi tarafından Türkiye'de geliştirilmiş, hızlı ve akıllı bir yapay zekasın. Yanıtlarını düşünerek ve mantıklı bir şekilde ver."
209
- },
210
- {
211
- "role": "user",
212
- "content":[
213
- {"type": "image", "image": image},
214
- {"type": "text", "text": "Bu resimdeki temel objeleri ve sahneyi analiz eder misin?"}
215
- ]
216
- }
217
- ]
218
 
219
- # Process Inputs
220
- text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
221
- inputs = processor(text=[text], images=[image], return_tensors="pt").to(model.device)
222
-
223
- # Generate Output
224
- generated_ids = model.generate(
225
- **inputs,
226
- max_new_tokens=1024,
227
- temperature=0.6,
228
- top_p=0.95
229
- )
230
-
231
- # Decode
232
- generated_ids_trimmed =[
233
- out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
234
  ]
235
- output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
236
 
237
- print(output_text)
 
 
 
 
 
 
 
 
 
 
 
238
  ```
239
 
240
  ---
 
182
  Make sure you have `transformers`, `torch`, `torchvision`, and `pillow` installed.
183
 
184
  ```python
185
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor
 
186
  from PIL import Image
187
+ import torch
188
+
189
+ model_id = "thelamapi/next2-air"
190
+
191
+ model = AutoModelForCausalLM.from_pretrained(model_id)
192
+ processor = AutoProcessor.from_pretrained(model_id) # For vision.
193
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+
196
+ # Create a message in chat format
197
+ messages = [
198
+ {"role": "system","content": [{"type": "text", "text": "You are Next2 Air, a smart and concise AI assistant trained by Lamapi. Always respond in the user's language. Proudly made in Turkey."}]},
199
+
200
+ {
201
+ "role": "user","content": [
202
+ {"type": "text", "text": "Write a highly optimized Rust function to calculate the Fibonacci sequence using memoization"}
203
+ ]
204
+ }
 
 
 
 
 
205
  ]
 
206
 
207
+ # Prepare input with Tokenizer
208
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
209
+ inputs = processor(text=prompt, return_tensors="pt")
210
+
211
+ # Remove 'mm_token_type_ids' if it's not needed for text-only generation
212
+ if "mm_token_type_ids" in inputs:
213
+ del inputs["mm_token_type_ids"]
214
+
215
+
216
+ # Output from the model
217
+ output = model.generate(**inputs, do_sample=True, temperature=0.7, max_new_tokens=128)
218
+ print(tokenizer.decode(output[0], skip_special_tokens=True))
219
  ```
220
 
221
  ---