Update with commit 45e14038f20d7f04574af4ce8356bab11e3d6741
Browse filesSee: https://github.com/huggingface/transformers/commit/45e14038f20d7f04574af4ce8356bab11e3d6741
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -120,6 +120,7 @@
|
|
| 120 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
| 121 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 122 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
|
| 123 |
{"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 124 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
| 125 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
|
|
|
| 120 |
{"model_type":"wav2vec2","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
| 121 |
{"model_type":"wav2vec2-conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 122 |
{"model_type":"wavlm","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 123 |
+
{"model_type":"whisper","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 124 |
{"model_type":"xclip","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
| 125 |
{"model_type":"xglm","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
| 126 |
{"model_type":"xlm","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
|
@@ -750,6 +750,8 @@
|
|
| 750 |
{"model_class":"WavLMForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
|
| 751 |
{"model_class":"WavLMForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
|
| 752 |
{"model_class":"WavLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
| 753 |
{"model_class":"XCLIPModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 754 |
{"model_class":"XGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 755 |
{"model_class":"XGLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
| 750 |
{"model_class":"WavLMForSequenceClassification","pipeline_tag":"audio-classification","auto_class":"AutoModelForAudioClassification"}
|
| 751 |
{"model_class":"WavLMForXVector","pipeline_tag":"audio-xvector","auto_class":"AutoModelForAudioXVector"}
|
| 752 |
{"model_class":"WavLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 753 |
+
{"model_class":"WhisperForConditionalGeneration","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForSpeechSeq2Seq"}
|
| 754 |
+
{"model_class":"WhisperModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 755 |
{"model_class":"XCLIPModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 756 |
{"model_class":"XGLMForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
| 757 |
{"model_class":"XGLMModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|