Upload lightonai_LightOnOCR-2-1B_0.txt with huggingface_hub
Browse files- lightonai_LightOnOCR-2-1B_0.txt +35 -23
lightonai_LightOnOCR-2-1B_0.txt
CHANGED
|
@@ -17,39 +17,41 @@ pipe(text=messages)
|
|
| 17 |
|
| 18 |
ERROR:
|
| 19 |
Traceback (most recent call last):
|
| 20 |
-
File "/tmp/lightonai_LightOnOCR-2-
|
| 21 |
pipe(text=messages)
|
| 22 |
~~~~^^^^^^^^^^^^^^^
|
| 23 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line
|
| 24 |
-
return super().__call__(Chat(text
|
| 25 |
-
|
| 26 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line
|
| 27 |
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
|
| 28 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 29 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line
|
| 30 |
model_outputs = self.forward(model_inputs, **forward_params)
|
| 31 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line
|
| 32 |
model_outputs = self._forward(model_inputs, **forward_params)
|
| 33 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line
|
| 34 |
generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
|
| 35 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context
|
| 36 |
return func(*args, **kwargs)
|
| 37 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line
|
| 38 |
result = decoding_method(
|
| 39 |
self,
|
| 40 |
...<5 lines>...
|
| 41 |
**model_kwargs,
|
| 42 |
)
|
| 43 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line
|
| 44 |
-
outputs = self(
|
|
|
|
|
|
|
| 45 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 46 |
return self._call_impl(*args, **kwargs)
|
| 47 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 48 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 49 |
return forward_call(*args, **kwargs)
|
| 50 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line
|
| 51 |
-
|
| 52 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line
|
| 53 |
outputs = self.model(
|
| 54 |
input_ids=input_ids,
|
| 55 |
...<11 lines>...
|
|
@@ -60,24 +62,34 @@ Traceback (most recent call last):
|
|
| 60 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 61 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 62 |
return forward_call(*args, **kwargs)
|
| 63 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line
|
| 64 |
-
|
| 65 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line
|
| 66 |
image_features = self.get_image_features(
|
|
|
|
| 67 |
pixel_values=pixel_values,
|
| 68 |
-
|
| 69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
)
|
| 71 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 247, in get_image_features
|
| 72 |
-
image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)
|
| 73 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 74 |
return self._call_impl(*args, **kwargs)
|
| 75 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 76 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 77 |
return forward_call(*args, **kwargs)
|
| 78 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line
|
| 79 |
output = func(self, *args, **kwargs)
|
| 80 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/pixtral/modeling_pixtral.py", line
|
| 81 |
patch_embeds = self.patch_conv(pixel_values)
|
| 82 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 83 |
return self._call_impl(*args, **kwargs)
|
|
|
|
| 17 |
|
| 18 |
ERROR:
|
| 19 |
Traceback (most recent call last):
|
| 20 |
+
File "/tmp/lightonai_LightOnOCR-2-1B_0g1kI4X.py", line 36, in <module>
|
| 21 |
pipe(text=messages)
|
| 22 |
~~~~^^^^^^^^^^^^^^^
|
| 23 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 283, in __call__
|
| 24 |
+
return super().__call__(Chat(text), **kwargs)
|
| 25 |
+
~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^
|
| 26 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1274, in __call__
|
| 27 |
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
|
| 28 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 29 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1281, in run_single
|
| 30 |
model_outputs = self.forward(model_inputs, **forward_params)
|
| 31 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1173, in forward
|
| 32 |
model_outputs = self._forward(model_inputs, **forward_params)
|
| 33 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 372, in _forward
|
| 34 |
generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
|
| 35 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context
|
| 36 |
return func(*args, **kwargs)
|
| 37 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2669, in generate
|
| 38 |
result = decoding_method(
|
| 39 |
self,
|
| 40 |
...<5 lines>...
|
| 41 |
**model_kwargs,
|
| 42 |
)
|
| 43 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2864, in _sample
|
| 44 |
+
outputs = self._prefill(input_ids, generation_config, model_kwargs)
|
| 45 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 3853, in _prefill
|
| 46 |
+
return self(**model_inputs, return_dict=True)
|
| 47 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 48 |
return self._call_impl(*args, **kwargs)
|
| 49 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 50 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 51 |
return forward_call(*args, **kwargs)
|
| 52 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 53 |
+
outputs = func(self, *args, **kwargs)
|
| 54 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 446, in forward
|
| 55 |
outputs = self.model(
|
| 56 |
input_ids=input_ids,
|
| 57 |
...<11 lines>...
|
|
|
|
| 62 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 63 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 64 |
return forward_call(*args, **kwargs)
|
| 65 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 66 |
+
outputs = func(self, *args, **kwargs)
|
| 67 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 313, in forward
|
| 68 |
image_features = self.get_image_features(
|
| 69 |
+
~~~~~~~~~~~~~~~~~~~~~~~^
|
| 70 |
pixel_values=pixel_values,
|
| 71 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 72 |
+
...<2 lines>...
|
| 73 |
+
return_dict=True,
|
| 74 |
+
^^^^^^^^^^^^^^^^^
|
| 75 |
+
).pooler_output
|
| 76 |
+
^
|
| 77 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 78 |
+
outputs = func(self, *args, **kwargs)
|
| 79 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 232, in get_image_features
|
| 80 |
+
image_outputs = self.vision_tower(
|
| 81 |
+
pixel_values,
|
| 82 |
+
...<3 lines>...
|
| 83 |
+
**kwargs,
|
| 84 |
)
|
|
|
|
|
|
|
| 85 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 86 |
return self._call_impl(*args, **kwargs)
|
| 87 |
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 88 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 89 |
return forward_call(*args, **kwargs)
|
| 90 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 835, in wrapper
|
| 91 |
output = func(self, *args, **kwargs)
|
| 92 |
+
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/pixtral/modeling_pixtral.py", line 496, in forward
|
| 93 |
patch_embeds = self.patch_conv(pixel_values)
|
| 94 |
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 95 |
return self._call_impl(*args, **kwargs)
|