Update with commit f3f6c86582611976e72be054675e2bf0abb5f775
Browse filesSee: https://github.com/huggingface/transformers/commit/f3f6c86582611976e72be054675e2bf0abb5f775
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -198,6 +198,7 @@
|
|
| 198 |
{"model_type":"pvt_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
| 199 |
{"model_type":"qdqbert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 200 |
{"model_type":"qwen2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
|
| 201 |
{"model_type":"qwen2_audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 202 |
{"model_type":"qwen2_moe","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 203 |
{"model_type":"qwen2_vl","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
|
| 198 |
{"model_type":"pvt_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
| 199 |
{"model_type":"qdqbert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 200 |
{"model_type":"qwen2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 201 |
+
{"model_type":"qwen2_5_vl","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 202 |
{"model_type":"qwen2_audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 203 |
{"model_type":"qwen2_moe","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 204 |
{"model_type":"qwen2_vl","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
pipeline_tags.json
CHANGED
|
@@ -748,6 +748,8 @@
|
|
| 748 |
{"model_class":"Qwen2MoeModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 749 |
{"model_class":"Qwen2VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 750 |
{"model_class":"Qwen2VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
| 751 |
{"model_class":"RTDetrForObjectDetection","pipeline_tag":"object-detection","auto_class":"AutoModelForObjectDetection"}
|
| 752 |
{"model_class":"RTDetrModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 753 |
{"model_class":"RecurrentGemmaForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
|
|
|
| 748 |
{"model_class":"Qwen2MoeModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 749 |
{"model_class":"Qwen2VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 750 |
{"model_class":"Qwen2VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 751 |
+
{"model_class":"Qwen2_5_VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 752 |
+
{"model_class":"Qwen2_5_VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 753 |
{"model_class":"RTDetrForObjectDetection","pipeline_tag":"object-detection","auto_class":"AutoModelForObjectDetection"}
|
| 754 |
{"model_class":"RTDetrModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 755 |
{"model_class":"RecurrentGemmaForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|