Update with commit 4b8c6d4cf8c779bf0895deb980669f5b2cb5d182
Browse filesSee: https://github.com/huggingface/transformers/commit/4b8c6d4cf8c779bf0895deb980669f5b2cb5d182
- frameworks.json +1 -0
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
|
@@ -215,6 +215,7 @@
|
|
| 215 |
{"model_type":"pvt_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
| 216 |
{"model_type":"qdqbert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 217 |
{"model_type":"qwen2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
|
| 218 |
{"model_type":"qwen2_5_vl","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 219 |
{"model_type":"qwen2_audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 220 |
{"model_type":"qwen2_moe","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
|
| 215 |
{"model_type":"pvt_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
| 216 |
{"model_type":"qdqbert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 217 |
{"model_type":"qwen2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 218 |
+
{"model_type":"qwen2_5_omni","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 219 |
{"model_type":"qwen2_5_vl","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 220 |
{"model_type":"qwen2_audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 221 |
{"model_type":"qwen2_moe","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
|
@@ -776,6 +776,7 @@
|
|
| 776 |
{"model_class":"Qwen2MoeModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 777 |
{"model_class":"Qwen2VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 778 |
{"model_class":"Qwen2VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
| 779 |
{"model_class":"Qwen2_5_VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 780 |
{"model_class":"Qwen2_5_VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 781 |
{"model_class":"Qwen3ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
|
|
|
| 776 |
{"model_class":"Qwen2MoeModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 777 |
{"model_class":"Qwen2VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 778 |
{"model_class":"Qwen2VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 779 |
+
{"model_class":"Qwen2_5OmniForConditionalGeneration","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
| 780 |
{"model_class":"Qwen2_5_VLForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 781 |
{"model_class":"Qwen2_5_VLModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 782 |
{"model_class":"Qwen3ForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|