Instructions to use jeff-RQ/new-test-model with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use jeff-RQ/new-test-model with Transformers:
# Use a pipeline as a high-level helper # Warning: Pipeline type "image-to-text" is no longer supported in transformers v5. # You must load the model directly (see below) or downgrade to v4.x with: # 'pip install "transformers<5.0.0' from transformers import pipeline pipe = pipeline("image-to-text", model="jeff-RQ/new-test-model")# Load model directly from transformers import AutoProcessor, AutoModelForVisualQuestionAnswering processor = AutoProcessor.from_pretrained("jeff-RQ/new-test-model") model = AutoModelForVisualQuestionAnswering.from_pretrained("jeff-RQ/new-test-model") - Notebooks
- Google Colab
- Kaggle
Update handler.py
Browse files- handler.py +3 -8
handler.py
CHANGED
|
@@ -19,15 +19,10 @@ class EndpointHandler:
|
|
| 19 |
data = data.pop("inputs", data)
|
| 20 |
text = data.pop("text", data)
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
print(image_1.size)
|
| 25 |
|
| 26 |
-
|
| 27 |
-
image_2 = Image.open(io.BytesIO(image_string_2))
|
| 28 |
-
print(image_2.size)
|
| 29 |
-
|
| 30 |
-
inputs = self.processor(images=[image_1, image_2], text=text, return_tensors="pt").to(self.device, torch.float16)
|
| 31 |
generated_ids = self.model.generate(**inputs)
|
| 32 |
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
| 33 |
|
|
|
|
| 19 |
data = data.pop("inputs", data)
|
| 20 |
text = data.pop("text", data)
|
| 21 |
|
| 22 |
+
image_string = base64.b64decode(data["image"])
|
| 23 |
+
image = Image.open(io.BytesIO(image_string))
|
|
|
|
| 24 |
|
| 25 |
+
inputs = self.processor(images=image, text=text, return_tensors="pt").to(self.device, torch.float16)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
generated_ids = self.model.generate(**inputs)
|
| 27 |
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
| 28 |
|