ariG23498 HF Staff commited on
Commit
8115fbc
·
verified ·
1 Parent(s): c3bd28c

Upload lightonai_LightOnOCR-2-1B_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. lightonai_LightOnOCR-2-1B_0.txt +97 -0
lightonai_LightOnOCR-2-1B_0.txt ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("image-text-to-text", model="lightonai/LightOnOCR-2-1B")
6
+ messages = [
7
+ {
8
+ "role": "user",
9
+ "content": [
10
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
11
+ {"type": "text", "text": "What animal is on the candy?"}
12
+ ]
13
+ },
14
+ ]
15
+ pipe(text=messages)
16
+ ```
17
+
18
+ ERROR:
19
+ Traceback (most recent call last):
20
+ File "/tmp/lightonai_LightOnOCR-2-1B_0vNGpic.py", line 36, in <module>
21
+ pipe(text=messages)
22
+ ~~~~^^^^^^^^^^^^^^^
23
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 346, in __call__
24
+ return super().__call__(Chat(text, images), **kwargs)
25
+ ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
26
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1467, in __call__
27
+ return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
28
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
29
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1474, in run_single
30
+ model_outputs = self.forward(model_inputs, **forward_params)
31
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1374, in forward
32
+ model_outputs = self._forward(model_inputs, **forward_params)
33
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 437, in _forward
34
+ generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
35
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context
36
+ return func(*args, **kwargs)
37
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2566, in generate
38
+ result = decoding_method(
39
+ self,
40
+ ...<5 lines>...
41
+ **model_kwargs,
42
+ )
43
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2786, in _sample
44
+ outputs = self(**model_inputs, return_dict=True)
45
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
46
+ return self._call_impl(*args, **kwargs)
47
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
48
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
49
+ return forward_call(*args, **kwargs)
50
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 918, in wrapper
51
+ output = func(self, *args, **kwargs)
52
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 469, in forward
53
+ outputs = self.model(
54
+ input_ids=input_ids,
55
+ ...<11 lines>...
56
+ **kwargs,
57
+ )
58
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
59
+ return self._call_impl(*args, **kwargs)
60
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
61
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
62
+ return forward_call(*args, **kwargs)
63
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 918, in wrapper
64
+ output = func(self, *args, **kwargs)
65
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 321, in forward
66
+ image_features = self.get_image_features(
67
+ pixel_values=pixel_values,
68
+ vision_feature_layer=vision_feature_layer,
69
+ image_sizes=image_sizes,
70
+ )
71
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 247, in get_image_features
72
+ image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)
73
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
74
+ return self._call_impl(*args, **kwargs)
75
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
76
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
77
+ return forward_call(*args, **kwargs)
78
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 918, in wrapper
79
+ output = func(self, *args, **kwargs)
80
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/pixtral/modeling_pixtral.py", line 479, in forward
81
+ patch_embeds = self.patch_conv(pixel_values)
82
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
83
+ return self._call_impl(*args, **kwargs)
84
+ ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
85
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
86
+ return forward_call(*args, **kwargs)
87
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/conv.py", line 553, in forward
88
+ return self._conv_forward(input, self.weight, self.bias)
89
+ ~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
90
+ File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/conv.py", line 548, in _conv_forward
91
+ return F.conv2d(
92
+ ~~~~~~~~^
93
+ input, weight, bias, self.stride, self.padding, self.dilation, self.groups
94
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
95
+ )
96
+ ^
97
+ RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (CUDABFloat16Type) should be the same