File size: 2,709 Bytes
d9ab257
 
 
 
 
 
 
 
b106aa6
 
d9ab257
b106aa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9ab257
 
b106aa6
d9ab257
 
b106aa6
 
d9ab257
 
b106aa6
 
d9ab257
 
b106aa6
 
 
 
 
d9ab257
 
b106aa6
 
 
 
d9ab257
 
 
b106aa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9ab257
 
 
b106aa6
 
 
 
 
 
 
d9ab257
 
 
b106aa6
 
 
 
 
 
 
 
 
 
 
 
 
d9ab257
 
 
b106aa6
 
 
 
d9ab257
 
 
b106aa6
 
 
 
d9ab257
 
 
b106aa6
 
 
 
 
 
d9ab257
 
 
b106aa6
 
 
d9ab257
 
 
b106aa6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# Model Card for Legion Coder 8M
# YAML Front Matter for Hugging Face Hub

base_model: dineth554/legion-coder-8m
library_name: transformers
license: mit
pipeline_tag: text-generation
language:
- en
- code
tags:
- transformers
- pytorch
- safetensors
- text-generation
- code-generation
- python
- javascript
- coding
- programming
- sagemaker
- amazon-sagemaker
- cpu
- compact
- efficient
- nvdya-kit
- death-legion

datasets:
- the-stack-v2

metrics:
- perplexity
- accuracy

model-index:
- name: Legion Coder 8M
results: []

inference:
parameters:
temperature: 0.8
top_p: 0.95
top_k: 50
max_new_tokens: 200

sagemaker:
sdk_version: "2.200.0"
instance_type: "ml.m5.large"
instance_count: 1
container_image: "huggingface-pytorch-inference:2.0.0-transformers4.28.1-cpu-py310-ubuntu20.04-v1.0"

# Model Details
model_details:
name: Legion Coder 8M
version: 1.0.0
description: A compact yet powerful 44M parameter transformer model optimized for coding tasks
developer: DEATH LEGION
powered_by: nvdya-kit
architecture: GPT-style Transformer
parameters: 44,341,632
model_size: 170MB
hidden_size: 576
num_layers: 13
num_heads: 16
context_length: 1024
vocabulary_size: 16000
format: Safetensors
precision: float32

# Training Details
training_details:
optimizer: AdamW
learning_rate: 5e-4
lr_schedule: cosine_decay
batch_size: 4
gradient_accumulation: true
training_steps: 10000
precision: float32

# Intended Use
intended_use:
primary_use_cases:
- Code completion and generation
- Function generation from descriptions
- Debugging assistance
- Code explanation and documentation
- Programming concept explanations
- Code scaffolding and prototyping
target_users:
- Software developers
- Students learning to code
- Data scientists
- DevOps engineers
- Technical writers

# Limitations
limitations:
- Limited to 1,024 token context window
- Trained primarily on Python code
- May generate code that requires review before production use
- Not suitable for non-coding tasks

# Ethical Considerations
ethical_considerations:
- Generated code should be reviewed before deployment
- May reproduce patterns from training data
- Not a replacement for human code review
- Users are responsible for compliance with licenses of generated code

# Citation
citation: |
@misc{legioncoder2026,
title={Legion Coder 8M: A Compact Transformer for Code Generation},
author={DEATH LEGION},
year={2026},
howpublished={\url{https://huggingface.co/dineth554/legion-coder-8m}}
}

# Contact
contact:
developer: DEATH LEGION
powered_by: nvdya-kit
repository: https://huggingface.co/dineth554/legion-coder-8m

# Branding
branding:
tagline: MADE WITH BY DEATH LEGION
powered_by: nvdya-kit
copyright: 2026 DEATH LEGION. All rights reserved.