Instructions to use jeff-RQ/new-test-model with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use jeff-RQ/new-test-model with Transformers:
# Use a pipeline as a high-level helper # Warning: Pipeline type "image-to-text" is no longer supported in transformers v5. # You must load the model directly (see below) or downgrade to v4.x with: # 'pip install "transformers<5.0.0' from transformers import pipeline pipe = pipeline("image-to-text", model="jeff-RQ/new-test-model")# Load model directly from transformers import AutoProcessor, AutoModelForVisualQuestionAnswering processor = AutoProcessor.from_pretrained("jeff-RQ/new-test-model") model = AutoModelForVisualQuestionAnswering.from_pretrained("jeff-RQ/new-test-model") - Notebooks
- Google Colab
- Kaggle
Update handler.py
Browse files- handler.py +6 -3
handler.py
CHANGED
|
@@ -19,10 +19,13 @@ class EndpointHandler:
|
|
| 19 |
data = data.pop("inputs", data)
|
| 20 |
text = data.pop("text", data)
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
| 26 |
generated_ids = self.model.generate(**inputs)
|
| 27 |
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
| 28 |
|
|
|
|
| 19 |
data = data.pop("inputs", data)
|
| 20 |
text = data.pop("text", data)
|
| 21 |
|
| 22 |
+
image_string_1 = base64.b64decode(data["image1"])
|
| 23 |
+
image_1 = Image.open(io.BytesIO(image_string_1))
|
| 24 |
|
| 25 |
+
image_string_2 = base64.b64decode(data["image2"])
|
| 26 |
+
image_2 = Image.open(io.BytesIO(image_string_2))
|
| 27 |
+
|
| 28 |
+
inputs = self.processor(images=[image_1, image_2], text=text, return_tensors="pt").to(self.device, torch.float16)
|
| 29 |
generated_ids = self.model.generate(**inputs)
|
| 30 |
generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
| 31 |
|