ariG23498 HF Staff commited on
Commit
c5bcc06
·
verified ·
1 Parent(s): 54522c5

Upload google_translategemma-12b-it_1.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. google_translategemma-12b-it_1.txt +7 -7
google_translategemma-12b-it_1.txt CHANGED
@@ -17,17 +17,17 @@ pipe(text=messages)
17
 
18
  ERROR:
19
  Traceback (most recent call last):
20
- File "/tmp/google_translategemma-12b-it_1bGolEG.py", line 26, in <module>
21
  pipe = pipeline("image-text-to-text", model="google/translategemma-12b-it")
22
- File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1229, in pipeline
23
- return pipeline_class(model=model, framework=framework, task=task, **kwargs)
24
- File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 191, in __init__
25
  super().__init__(*args, **kwargs)
26
  ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
27
- File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1044, in __init__
28
  self.model.to(self.device)
29
  ~~~~~~~~~~~~~^^^^^^^^^^^^^
30
- File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4343, in to
31
  return super().to(*args, **kwargs)
32
  ~~~~~~~~~~^^^^^^^^^^^^^^^^^
33
  File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1381, in to
@@ -56,4 +56,4 @@ Traceback (most recent call last):
56
  ^^^^^^^^^^^^^
57
  )
58
  ^
59
- torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 114.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 78.69 MiB is free. Process 70173 has 22.22 GiB memory in use. Of the allocated memory 21.75 GiB is allocated by PyTorch, and 233.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
 
17
 
18
  ERROR:
19
  Traceback (most recent call last):
20
+ File "/tmp/google_translategemma-12b-it_1n974lG.py", line 26, in <module>
21
  pipe = pipeline("image-text-to-text", model="google/translategemma-12b-it")
22
+ File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
23
+ return pipeline_class(model=model, task=task, **kwargs)
24
+ File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 127, in __init__
25
  super().__init__(*args, **kwargs)
26
  ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
27
+ File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/pipelines/base.py", line 861, in __init__
28
  self.model.to(self.device)
29
  ~~~~~~~~~~~~~^^^^^^^^^^^^^
30
+ File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/transformers/modeling_utils.py", line 3587, in to
31
  return super().to(*args, **kwargs)
32
  ~~~~~~~~~~^^^^^^^^^^^^^^^^^
33
  File "/tmp/.cache/uv/environments-v2/b56b4359def432d5/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1381, in to
 
56
  ^^^^^^^^^^^^^
57
  )
58
  ^
59
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 114.00 MiB. GPU 0 has a total capacity of 22.30 GiB of which 78.69 MiB is free. Process 189073 has 22.22 GiB memory in use. Of the allocated memory 21.75 GiB is allocated by PyTorch, and 233.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)