ariG23498 HF Staff commited on
Commit
42cbcbb
·
verified ·
1 Parent(s): b121d9e

Upload facebook_sam3_2.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. facebook_sam3_2.txt +29 -7
facebook_sam3_2.txt CHANGED
@@ -1,17 +1,39 @@
1
  ```CODE:
2
  # Load model directly
3
- from transformers import AutoImageProcessor, AutoModel
4
 
5
- processor = AutoImageProcessor.from_pretrained("facebook/sam3")
6
  model = AutoModel.from_pretrained("facebook/sam3")
7
  ```
8
 
9
  ERROR:
10
  Traceback (most recent call last):
11
- File "/tmp/facebook_sam3_2XwbsCA.py", line 26, in <module>
12
- processor = AutoImageProcessor.from_pretrained("facebook/sam3")
13
- File "/tmp/.cache/uv/environments-v2/e0ad3a6bca3defbb/lib/python3.13/site-packages/transformers/models/auto/image_processing_auto.py", line 622, in from_pretrained
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  raise ValueError(
15
- ...<3 lines>...
16
  )
17
- ValueError: Unrecognized image processor in facebook/sam3. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: aimv2, aimv2_vision_model, align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, cohere2_vision, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deepseek_vl, deepseek_vl_hybrid, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, dinov3_vit, donut-swin, dpt, edgetam, efficientformer, efficientloftr, efficientnet, eomt, flava, focalnet, fuyu, gemma3, gemma3n, git, glm4v, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, janus, kosmos-2, kosmos-2.5, layoutlmv2, layoutlmv3, levit, lfm2_vl, lightglue, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, metaclip_2, mgp-str, mistral3, mlcd, mllama, mm-grounding-dino, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, ovis2, owlv2, owlvit, paligemma, perceiver, perception_lm, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, qwen3_vl, regnet, resnet, rt_detr, sam, sam2, sam_hq, segformer, seggpt, shieldgemma2, siglip, siglip2, smolvlm, superglue, superpoint, swiftformer, swin, swin2sr, swinv2, table-transformer, textnet, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth
 
 
 
1
  ```CODE:
2
  # Load model directly
3
+ from transformers import AutoTokenizer, AutoModel
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("facebook/sam3")
6
  model = AutoModel.from_pretrained("facebook/sam3")
7
  ```
8
 
9
  ERROR:
10
  Traceback (most recent call last):
11
+ File "/tmp/.cache/uv/environments-v2/e0ad3a6bca3defbb/lib/python3.13/site-packages/transformers/models/auto/configuration_auto.py", line 1360, in from_pretrained
12
+ config_class = CONFIG_MAPPING[config_dict["model_type"]]
13
+ ~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^
14
+ File "/tmp/.cache/uv/environments-v2/e0ad3a6bca3defbb/lib/python3.13/site-packages/transformers/models/auto/configuration_auto.py", line 1048, in __getitem__
15
+ raise KeyError(key)
16
+ KeyError: 'sam3_video'
17
+
18
+ During handling of the above exception, another exception occurred:
19
+
20
+ Traceback (most recent call last):
21
+ File "/tmp/facebook_sam3_2VcD99t.py", line 27, in <module>
22
+ model = AutoModel.from_pretrained("facebook/sam3")
23
+ File "/tmp/.cache/uv/environments-v2/e0ad3a6bca3defbb/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 549, in from_pretrained
24
+ config, kwargs = AutoConfig.from_pretrained(
25
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
26
+ pretrained_model_name_or_path,
27
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
28
+ ...<4 lines>...
29
+ **kwargs,
30
+ ^^^^^^^^^
31
+ )
32
+ ^
33
+ File "/tmp/.cache/uv/environments-v2/e0ad3a6bca3defbb/lib/python3.13/site-packages/transformers/models/auto/configuration_auto.py", line 1362, in from_pretrained
34
  raise ValueError(
35
+ ...<8 lines>...
36
  )
37
+ ValueError: The checkpoint you are trying to load has model type `sam3_video` but Transformers does not recognize this architecture. This could be because of an issue with the checkpoint, or because your version of Transformers is out of date.
38
+
39
+ You can update Transformers with the command `pip install --upgrade transformers`. If this does not work, and the checkpoint is very new, then there may not be a release version that supports this model yet. In this case, you can get the most up-to-date code by installing Transformers from source with the command `pip install git+https://github.com/huggingface/transformers.git`