Update with commit 049e7917587aba45617cae0824d73f87009ce8e3
Browse filesSee: https://github.com/huggingface/transformers/commit/049e7917587aba45617cae0824d73f87009ce8e3
- frameworks.json +1 -1
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -15,7 +15,7 @@
|
|
| 15 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 16 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
| 17 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 18 |
-
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":
|
| 19 |
{"model_type":"deberta","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 20 |
{"model_type":"deberta-v2","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 21 |
{"model_type":"decision_transformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
|
| 15 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 16 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
| 17 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
| 18 |
+
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
| 19 |
{"model_type":"deberta","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 20 |
{"model_type":"deberta-v2","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
| 21 |
{"model_type":"decision_transformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
|
@@ -449,6 +449,8 @@
|
|
| 449 |
{"model_class":"TFConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
| 450 |
{"model_class":"TFConvNextModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
| 451 |
{"model_class":"TFDPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
|
|
|
|
| 452 |
{"model_class":"TFDebertaForMaskedLM","pipeline_tag":"fill-mask","auto_class":"TF_AutoModelForMaskedLM"}
|
| 453 |
{"model_class":"TFDebertaForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"TF_AutoModelForQuestionAnswering"}
|
| 454 |
{"model_class":"TFDebertaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"TF_AutoModelForSequenceClassification"}
|
|
|
|
| 449 |
{"model_class":"TFConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
| 450 |
{"model_class":"TFConvNextModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
| 451 |
{"model_class":"TFDPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
| 452 |
+
{"model_class":"TFData2VecVisionForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
| 453 |
+
{"model_class":"TFData2VecVisionModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
| 454 |
{"model_class":"TFDebertaForMaskedLM","pipeline_tag":"fill-mask","auto_class":"TF_AutoModelForMaskedLM"}
|
| 455 |
{"model_class":"TFDebertaForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"TF_AutoModelForQuestionAnswering"}
|
| 456 |
{"model_class":"TFDebertaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"TF_AutoModelForSequenceClassification"}
|