Instructions to use Kibalama/Digit_classification_model with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Kibalama/Digit_classification_model with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-classification", model="Kibalama/Digit_classification_model") pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/parrots.png")# Load model directly from transformers import AutoImageProcessor, AutoModelForImageClassification processor = AutoImageProcessor.from_pretrained("Kibalama/Digit_classification_model") model = AutoModelForImageClassification.from_pretrained("Kibalama/Digit_classification_model") - Notebooks
- Google Colab
- Kaggle
| { | |
| "architectures": [ | |
| "ViTForImageClassification" | |
| ], | |
| "attention_probs_dropout_prob": 0.0, | |
| "encoder_stride": 16, | |
| "hidden_act": "gelu", | |
| "hidden_dropout_prob": 0.0, | |
| "hidden_size": 768, | |
| "id2label": { | |
| "0": "0", | |
| "1": "1", | |
| "2": "2", | |
| "3": "3", | |
| "4": "4", | |
| "5": "5", | |
| "6": "6", | |
| "7": "7", | |
| "8": "8", | |
| "9": "9" | |
| }, | |
| "image_size": 224, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 3072, | |
| "label2id": { | |
| "0": "0", | |
| "1": "1", | |
| "2": "2", | |
| "3": "3", | |
| "4": "4", | |
| "5": "5", | |
| "6": "6", | |
| "7": "7", | |
| "8": "8", | |
| "9": "9" | |
| }, | |
| "layer_norm_eps": 1e-12, | |
| "model_type": "vit", | |
| "num_attention_heads": 12, | |
| "num_channels": 3, | |
| "num_hidden_layers": 12, | |
| "patch_size": 16, | |
| "pooler_act": "tanh", | |
| "pooler_output_size": 768, | |
| "problem_type": "single_label_classification", | |
| "qkv_bias": true, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.52.4" | |
| } | |