language: en

license: mit

tags: classification

datasets: Used on https://huggingface.co/datasets/FaiyazAzam/hw1-image-ds-groot-224

metrics: accuracy, weighted F1

model-index:

name: mobilenetv3_small_100

task:

  • type: Neural Net for Image Classification
  • name: Groot Classification

dataset:

  • type: Image
  • name: hw1-image-ds-groot-224

results:

metrics:

-  type: accuracy

    value: 0.950000
    
-  type: weighted F1

    value: 0.949986

hyperparameters: - {'model': {'names': ['timm_image'], 'timm_image': {'checkpoint_name': 'mobilenetv3_small_100', 'mix_choice': 'all_logits', 'data_types': ['image'], 'train_transforms': ['resize_shorter_side', 'center_crop', 'trivial_augment'], 'val_transforms': ['resize_shorter_side', 'center_crop'], 'image_norm': 'imagenet', 'image_size': None, 'image_chan_num': 3, 'use_learnable_image': False, 'max_image_num_per_column': 1}}, 'data': {'image': {'missing_value_strategy': 'zero'}, 'text': {'normalize_text': False}, 'categorical': {'minimum_cat_count': 100, 'maximum_num_cat': 20, 'convert_to_text': False, 'convert_to_text_template': 'latex'}, 'numerical': {'convert_to_text': False, 'scaler_with_mean': True, 'scaler_with_std': True}, 'document': {'missing_value_strategy': 'zero'}, 'label': {'numerical_preprocessing': 'standardscaler'}, 'pos_label': None, 'column_features_pooling_mode': 'concat', 'mixup': {'turn_on': False, 'mixup_alpha': 0.8, 'cutmix_alpha': 1.0, 'cutmix_minmax': None, 'prob': 1.0, 'switch_prob': 0.5, 'mode': 'batch', 'turn_off_epoch': 5, 'label_smoothing': 0.1}, 'modality_dropout': 0, 'templates': {'turn_on': False, 'num_templates': 30, 'template_length': 2048, 'preset_templates': ['super_glue', 'rte'], 'custom_templates': None}}, 'optim': {'optim_type': 'adamw', 'lr': 0.0004, 'weight_decay': 0.001, 'lr_choice': 'layerwise_decay', 'lr_decay': 0.9, 'lr_schedule': 'cosine_decay', 'max_epochs': 20, 'max_steps': -1, 'warmup_steps': 0.1, 'end_lr': 0, 'lr_mult': 1, 'patience': 10, 'val_check_interval': 0.5, 'check_val_every_n_epoch': 1, 'skip_final_val': False, 'gradient_clip_val': 1, 'gradient_clip_algorithm': 'norm', 'track_grad_norm': -1, 'log_every_n_steps': 10, 'label_smoothing': 0, 'top_k': 3, 'top_k_average_method': 'greedy_soup', 'peft': None, 'lora': {'module_filter': None, 'filter': ['query', 'value', '^q$', '^v$', '^k$', '^o$'], 'r': 8, 'alpha': 8, 'conv_lora_expert_num': 8}, 'loss_func': 'auto', 'focal_loss': {'alpha': None, 'gamma': 2.0, 'reduction': 'mean'}, 'mask2former_loss': {'loss_cross_entropy_weight': 10.0, 'loss_mask_weight': 5.0, 'loss_dice_weight': 5.0}, 'extra_trainable_params': [], 'cross_modal_align': None, 'cross_modal_align_weight': 0, 'automatic_optimization': True, 'lemda': {'turn_on': False, 'arch_type': 'mlp_vae', 'z_dim': 8, 'num_layers': 6, 'kld_weight': 0.1, 'mse_weight': 0.1, 'adv_weight': 0.0001, 'consist_weight': 0.01, 'consist_threshold': 0.5, 'lr': 0.0001, 'optim_type': 'adamw', 'weight_decay': 1e-05}}, 'env': {'num_gpus': 0, 'num_nodes': 1, 'batch_size': 128, 'per_gpu_batch_size': 8, 'inference_batch_size_ratio': 4, 'precision': 32, 'num_workers': 2, 'num_workers_inference': 2, 'accelerator': 'auto', 'fast_dev_run': False, 'deterministic': False, 'auto_select_gpus': True, 'strategy': 'auto', 'deepspeed_allgather_size': 1000000000.0, 'deepspeed_allreduce_size': 1000000000.0, 'compile': {'turn_on': False, 'mode': 'default', 'dynamic': True, 'backend': 'inductor'}}}

Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support