{ "_name_or_path": "openai/whisper-tiny", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": [ "WhisperForConditionalGeneration" ], "attention_dropout": 0.0, "begin_suppress_tokens": null, "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 384, "decoder_attention_heads": 6, "decoder_ffn_dim": 1536, "decoder_layerdrop": 0.0, "decoder_layers": 4, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 6, "encoder_ffn_dim": 1536, "encoder_layerdrop": 0.0, "encoder_layers": 4, "eos_token_id": 50257, "forced_decoder_ids": null, "init_std": 0.02, "is_encoder_decoder": true, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_length": null, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 4, "num_mel_bins": 80, "pad_token_id": 50257, "quantization_config": { "config_groups": { "group_0": { "input_activations": null, "output_activations": null, "targets": [ "Linear" ], "weights": { "actorder": null, "block_structure": null, "dynamic": false, "group_size": 128, "num_bits": 4, "observer": "minmax", "observer_kwargs": {}, "strategy": "group", "symmetric": true, "type": "int" } } }, "format": "pack-quantized", "global_compression_ratio": 2.3172346830253385, "ignore": [], "kv_cache_scheme": null, "quant_method": "compressed-tensors", "quantization_status": "compressed", "sparsity_config": { "format": "dense", "global_sparsity": 0.21430952951684026, "ignore": [ "model.encoder.layers.0.self_attn.v_proj", "model.encoder.layers.0.self_attn.q_proj", "model.encoder.layers.0.self_attn.out_proj", "model.encoder.layers.0.fc1", "model.encoder.layers.0.fc2", "model.encoder.layers.1.self_attn.k_proj", "model.encoder.layers.1.self_attn.v_proj", "model.encoder.layers.1.self_attn.q_proj", "model.encoder.layers.1.self_attn.out_proj", "model.encoder.layers.1.fc1", "model.encoder.layers.1.fc2", "model.encoder.layers.2.self_attn.k_proj", "model.encoder.layers.2.self_attn.v_proj", "model.encoder.layers.2.self_attn.q_proj", "model.encoder.layers.2.self_attn.out_proj", "model.encoder.layers.2.fc1", "model.encoder.layers.2.fc2", "model.encoder.layers.3.self_attn.k_proj", "model.encoder.layers.3.self_attn.v_proj", "model.encoder.layers.3.self_attn.q_proj", "model.encoder.layers.3.self_attn.out_proj", "model.encoder.layers.3.fc1", "model.encoder.layers.3.fc2", "model.decoder.layers.0.self_attn.k_proj", "model.decoder.layers.0.self_attn.v_proj", "model.decoder.layers.0.self_attn.q_proj", "model.decoder.layers.0.self_attn.out_proj", "model.decoder.layers.0.encoder_attn.k_proj", "model.decoder.layers.0.encoder_attn.v_proj", "model.decoder.layers.0.encoder_attn.q_proj", "model.decoder.layers.0.encoder_attn.out_proj", "model.decoder.layers.0.fc1", "model.decoder.layers.0.fc2", "model.decoder.layers.1.self_attn.k_proj", "model.decoder.layers.1.self_attn.v_proj", "model.decoder.layers.1.self_attn.q_proj", "model.decoder.layers.1.self_attn.out_proj", "model.decoder.layers.1.encoder_attn.k_proj", "model.decoder.layers.1.encoder_attn.v_proj", "model.decoder.layers.1.encoder_attn.q_proj", "model.decoder.layers.1.encoder_attn.out_proj", "model.decoder.layers.1.fc1", "model.decoder.layers.1.fc2", "model.decoder.layers.2.self_attn.k_proj", "model.decoder.layers.2.self_attn.v_proj", "model.decoder.layers.2.self_attn.q_proj", "model.decoder.layers.2.self_attn.out_proj", "model.decoder.layers.2.encoder_attn.k_proj", "model.decoder.layers.2.encoder_attn.v_proj", "model.decoder.layers.2.encoder_attn.q_proj", "model.decoder.layers.2.encoder_attn.out_proj", "model.decoder.layers.2.fc1", "model.decoder.layers.2.fc2", "model.decoder.layers.3.self_attn.k_proj", "model.decoder.layers.3.self_attn.v_proj", "model.decoder.layers.3.self_attn.q_proj", "model.decoder.layers.3.self_attn.out_proj", "model.decoder.layers.3.encoder_attn.k_proj", "model.decoder.layers.3.encoder_attn.v_proj", "model.decoder.layers.3.encoder_attn.q_proj", "model.decoder.layers.3.encoder_attn.out_proj", "model.decoder.layers.3.fc1", "model.decoder.layers.3.fc2", "proj_out" ], "registry_requires_subclass": false, "sparsity_structure": "unstructured", "targets": [ "model.encoder.layers.0.self_attn.k_proj" ] } }, "scale_embedding": false, "torch_dtype": "float32", "transformers_version": "4.47.1", "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51865 }