Update with commit 3d276453a2b7c74f3259b1c136db3dd79c51756b
Browse filesSee: https://github.com/huggingface/transformers/commit/3d276453a2b7c74f3259b1c136db3dd79c51756b
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
|
@@ -192,6 +192,7 @@
|
|
| 192 |
{"model_type":"lfm2_moe","pytorch":true,"processor":"AutoTokenizer"}
|
| 193 |
{"model_type":"lfm2_vl","pytorch":true,"processor":"AutoProcessor"}
|
| 194 |
{"model_type":"lightglue","pytorch":true,"processor":"AutoImageProcessor"}
|
|
|
|
| 195 |
{"model_type":"lilt","pytorch":true,"processor":"AutoTokenizer"}
|
| 196 |
{"model_type":"llama","pytorch":true,"processor":"AutoTokenizer"}
|
| 197 |
{"model_type":"llama4","pytorch":true,"processor":"AutoProcessor"}
|
|
|
|
| 192 |
{"model_type":"lfm2_moe","pytorch":true,"processor":"AutoTokenizer"}
|
| 193 |
{"model_type":"lfm2_vl","pytorch":true,"processor":"AutoProcessor"}
|
| 194 |
{"model_type":"lightglue","pytorch":true,"processor":"AutoImageProcessor"}
|
| 195 |
+
{"model_type":"lighton_ocr","pytorch":true,"processor":"AutoProcessor"}
|
| 196 |
{"model_type":"lilt","pytorch":true,"processor":"AutoTokenizer"}
|
| 197 |
{"model_type":"llama","pytorch":true,"processor":"AutoTokenizer"}
|
| 198 |
{"model_type":"llama4","pytorch":true,"processor":"AutoProcessor"}
|
pipeline_tags.json
CHANGED
|
@@ -655,6 +655,8 @@
|
|
| 655 |
{"model_class":"Lfm2VlForConditionalGeneration","pipeline_tag":"image-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 656 |
{"model_class":"Lfm2VlModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 657 |
{"model_class":"LightGlueForKeypointMatching","pipeline_tag":"keypoint-matching","auto_class":"AutoModelForKeypointMatching"}
|
|
|
|
|
|
|
| 658 |
{"model_class":"LiltForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
| 659 |
{"model_class":"LiltForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
| 660 |
{"model_class":"LiltForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|
|
|
|
| 655 |
{"model_class":"Lfm2VlForConditionalGeneration","pipeline_tag":"image-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 656 |
{"model_class":"Lfm2VlModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 657 |
{"model_class":"LightGlueForKeypointMatching","pipeline_tag":"keypoint-matching","auto_class":"AutoModelForKeypointMatching"}
|
| 658 |
+
{"model_class":"LightOnOcrForConditionalGeneration","pipeline_tag":"image-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 659 |
+
{"model_class":"LightOnOcrModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 660 |
{"model_class":"LiltForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
| 661 |
{"model_class":"LiltForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
| 662 |
{"model_class":"LiltForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|