Create model_card.yaml
Browse files- model_card.yaml +59 -0
model_card.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_details:
|
| 2 |
+
name: Kirim1/Bert
|
| 3 |
+
description: A multilingual instruction-following language model supporting 100+ languages with primary focus on English
|
| 4 |
+
version: 1.0.0
|
| 5 |
+
author: Kirim1
|
| 6 |
+
license: apache-2.0
|
| 7 |
+
model_type: bert
|
| 8 |
+
base_model: BERT
|
| 9 |
+
|
| 10 |
+
model_characteristics:
|
| 11 |
+
architecture: BertForCausalLM
|
| 12 |
+
parameters: ~110M
|
| 13 |
+
hidden_size: 768
|
| 14 |
+
num_layers: 12
|
| 15 |
+
num_attention_heads: 12
|
| 16 |
+
vocab_size: 119547
|
| 17 |
+
max_position_embeddings: 512
|
| 18 |
+
context_window: 512
|
| 19 |
+
|
| 20 |
+
capabilities:
|
| 21 |
+
primary_language: English
|
| 22 |
+
supported_languages: 100+
|
| 23 |
+
instruction_following: true
|
| 24 |
+
text_generation: true
|
| 25 |
+
multilingual_understanding: true
|
| 26 |
+
cross_lingual_tasks: true
|
| 27 |
+
|
| 28 |
+
training:
|
| 29 |
+
training_data: Multilingual corpus with English emphasis
|
| 30 |
+
instruction_tuning: true
|
| 31 |
+
optimization: Instruction-following and task completion
|
| 32 |
+
|
| 33 |
+
use_cases:
|
| 34 |
+
- Natural language understanding
|
| 35 |
+
- Text generation
|
| 36 |
+
- Question answering
|
| 37 |
+
- Text classification
|
| 38 |
+
- Multilingual text processing
|
| 39 |
+
- Instruction-following tasks
|
| 40 |
+
- Cross-lingual information retrieval
|
| 41 |
+
|
| 42 |
+
limitations:
|
| 43 |
+
- Performance varies across language families
|
| 44 |
+
- Best results with English inputs
|
| 45 |
+
- May require domain-specific fine-tuning
|
| 46 |
+
- Limited performance on low-resource languages
|
| 47 |
+
|
| 48 |
+
ethical_considerations:
|
| 49 |
+
- May reflect biases in training data
|
| 50 |
+
- Requires careful deployment in production
|
| 51 |
+
- Not suitable for sensitive decision-making without oversight
|
| 52 |
+
- Regular monitoring recommended
|
| 53 |
+
|
| 54 |
+
technical_specifications:
|
| 55 |
+
framework: PyTorch
|
| 56 |
+
library: transformers
|
| 57 |
+
minimum_transformers_version: 4.36.0
|
| 58 |
+
precision: float32
|
| 59 |
+
deployment: CPU and GPU compatible
|