liamcripwell commited on
Commit
f04ba3a
·
verified ·
1 Parent(s): 08e3e7e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +17 -15
README.md CHANGED
@@ -170,11 +170,10 @@ Pàgina 2 de 2
170
 
171
  ## vLLM:
172
  ```
173
- vllm serve numind/NuMarkdown-8B-reasoning --trust_remote_code --limit-mm-per-prompt image=1
174
  ```
175
 
176
  ```python
177
- import json
178
  from openai import OpenAI
179
  import base64
180
 
@@ -193,28 +192,31 @@ def encode_image(image_path):
193
  with open(image_path, "rb") as image_file:
194
  return base64.b64encode(image_file.read()).decode('utf-8')
195
 
196
- base64_image = encode_image("invoice.png")
 
197
 
198
  chat_response = client.chat.completions.create(
199
- model="numind/NuMarkdown-8B-reasoning",
200
  temperature=0.7,
201
  messages=[
 
 
 
202
  {
203
- "role": "user",
204
- "content": [
205
- {"type": "image_url",
206
- "image_url": {"url": data_url},
207
- "min_pixels": 100 * 28 * 28,
208
- "max_pixels": 5000 * 28 * 28,},
209
-
210
- ],
211
  },
212
  ],
213
-
 
214
  )
215
 
216
- reasoning = chat_response.choices[0].message.content.split("<thining>")[1].split("</thining>")[0]
217
- answer = chat_response.choices[0].message.content.split("<answer>")[1].split("</answer>")[0]
 
 
218
  ```
219
 
220
 
 
170
 
171
  ## vLLM:
172
  ```
173
+ vllm serve numind/NuMarkdown-8B-Thinking --trust_remote_code --limit-mm-per-prompt image=1
174
  ```
175
 
176
  ```python
 
177
  from openai import OpenAI
178
  import base64
179
 
 
192
  with open(image_path, "rb") as image_file:
193
  return base64.b64encode(image_file.read()).decode('utf-8')
194
 
195
+ base64_image = encode_image("image.png")
196
+ data_url = f"data:image/jpeg;base64,{base64_image}"
197
 
198
  chat_response = client.chat.completions.create(
199
+ model="numind/NuMarkdown-8B-Thinking",
200
  temperature=0.7,
201
  messages=[
202
+ {
203
+ "role": "user",
204
+ "content": [
205
  {
206
+ "type": "image_url",
207
+ "image_url": {"url": data_url},
208
+ "min_pixels": 100 * 28 * 28,
209
+ "max_pixels": 5000 * 28 * 28,
 
 
 
 
210
  },
211
  ],
212
+ },
213
+ ]
214
  )
215
 
216
+ result = chat_response.choices[0].message.content
217
+ reasoning = result.split("<think>")[1].split("</think>")[0]
218
+ answer = result.split("<answer>")[1].split("</answer>")[0]
219
+ print(answer)
220
  ```
221
 
222