MCOD
MCOD, which stands for "Model Configs on Drugs," is a 4.7M parameter model trained on 7.1M tokens of Hugging Face model configs.
We are well aware that 7.1M tokens is below the Chinchilla optimal target, but including more tokens wouldn't improve diversity. For example, after cleaning the full 90M token dataset, we were left with 7.1M tokens after deduplication (over 13k docs) and filtering (by language and length).
MCOD generates plausible-looking configs with the correct hyperparameters per model family.
Architecture
| Parameter | Value |
|---|---|
| hidden_size | 256 |
| num_hidden_layers | 4 |
| num_attention_heads | 4 |
| num_key_value_heads | 4 |
| intermediate_size | 1024 |
| max_position_embeddings | 1024 |
| rope_theta | 100000.0 |
| tie_word_embeddings | true |
MCOD uses the Qwen3 architecture.
Training
MCOD was trained on 18k entries, 7.1M tokens, and 1M words.
Hardware
MCOD was trained on one NVIDIA RTX 2060 6GB for 3 epochs with a batch size of 8.
Training Results
| Step | Epoch | Train Loss | Train PPL | Eval Loss | Eval PPL |
|---|---|---|---|---|---|
| 200 | 0.44 | 4.1022 | 60.53 | 1.8218 | 6.18 |
| 400 | 0.88 | 1.0227 | 2.78 | 0.5671 | 1.76 |
| 600 | 1.33 | 0.5434 | 1.72 | 0.3560 | 1.43 |
| 800 | 1.77 | 0.3978 | 1.49 | 0.2939 | 1.34 |
| 1000 | 2.21 | 0.3486 | 1.42 | 0.2514 | 1.29 |
| 1200 | 2.65 | 0.2944 | 1.34 | 0.2259 | 1.25 |
Generations
Prompt: {"model_type": "gpt2"
Output:
, "n_ctx": 1024, "n_embd": 768, "n_head": 12, "n_inner": null, "n_layer": 6, "n_positions": 1024, "reorder_and_upcast_attn": false, "resid_pdrop": 0.1, "scale_attn_by_inverse_layer_idx": false, "scale_attn_weights": true, "summary_activation": null, "summary_first_dropout": 0.1, "summary_proj_to_labels": true, "summary_type": "cls_index", "summary_use_proj": true, "task_specific_params": {"text-generation": {"do_sample": true, "max_length": 50}}, "torch_dtype": "float32", "transformers_version": "4.25.1", "use_cache": true, "vocab_size": 50257}
Prompt: {
Output:
"architectures": ["RobertaForSequenceClassification"], "attention_probs_dropout_prob": 0.1, "bos_token_id": 0, "classifier_dropout": null, "eos_token_id": 2, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": {"0": "LABEL_0"}, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": {"LABEL_0": 0}, "layer_norm_eps": 1e-05, "max_position_embeddings": 514, "model_type": "roberta", "num_attention_heads": 12, "num_hidden_layers": 6, "pad_token_id": 1, "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", "transformers_version": "4.11.3", "type_vocab_size": 1, "use_cache": true, "vocab_size": 50265}
Prompt: The cat crossed
Output:
, "architectures": ["BertForSequenceClassification"], "attention_probs_dropout_prob": 0.1, "classifier_dropout": null, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": {"0": "anger", "1": "fear", "2": "surprise"}, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": {"sadness": 0, "neutral": 1, "joy": 2}, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", "transformers_version": "4.11.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30522}
Limitations
- Only generates model configs
- Cannot converse or reason
- Most unconditionally generated configs are BERT- or BART-centered
Use Cases
- Educational research
- JSON modeling
- Generating synthetic configs for pretraining or fine-tuning datasets (be careful; the model hallucinates a lot)
- Or, more simply, for fun.
Inference
# =============================================================================
# Inference
# =============================================================================
MODEL_DIR = "Harley-ml/MCOD-4.7M" # path
TOKENIZER_PATH = MODEL_DIR
# --- Generation settings ---
PROMPT = "{" # prompt
MAX_NEW_TOKENS = 1024
TEMPERATURE = 0.7
TOP_P = 0.95
TOP_K = 50
REPETITION_PENALTY = 1.1
DO_SAMPLE = True
# =============================================================================
import torch
from pathlib import Path
from transformers import (
AutoModelForCausalLM,
PreTrainedTokenizerFast,
AddedToken,
)
# ---------------------------------------------------------------------------
# Device
# ---------------------------------------------------------------------------
device = (
"cuda" if torch.cuda.is_available() else
"mps" if torch.backends.mps.is_available() else
"cpu"
)
print(f"Device : {device}")
# ---------------------------------------------------------------------------
# Tokenizer (mirrors training setup)
# ---------------------------------------------------------------------------
def load_tokenizer(path: str):
p = Path(path).resolve()
if not p.exists():
raise FileNotFoundError(f"Tokenizer not found: {p}")
tok = PreTrainedTokenizerFast(tokenizer_file=str(p))
specials = {}
if tok.bos_token is None: specials["bos_token"] = AddedToken("<|bos|>", special=True)
if tok.eos_token is None: specials["eos_token"] = AddedToken("<|eos|>", special=True)
if tok.unk_token is None: specials["unk_token"] = AddedToken("<|unk|>", special=True)
if tok.pad_token is None:
if tok.eos_token is not None:
tok.pad_token = tok.eos_token
else:
specials["pad_token"] = AddedToken("<|pad|>", special=True)
if specials:
tok.add_special_tokens(specials)
tok.padding_side = "left" # left-pad for batched generation
return tok
print("Loading tokenizer...")
tokenizer = load_tokenizer(TOKENIZER_PATH)
print(f" Vocab size : {tokenizer.vocab_size}")
print(f" BOS : {tokenizer.bos_token!r}")
print(f" EOS : {tokenizer.eos_token!r}")
print(f" PAD : {tokenizer.pad_token!r} (id={tokenizer.pad_token_id})")
# ---------------------------------------------------------------------------
# Model
# ---------------------------------------------------------------------------
print(f"\nLoading model from {MODEL_DIR} ...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_DIR,
dtype=torch.float16 if device == "cuda" else torch.float32,
low_cpu_mem_usage=True,
)
model.eval()
model.to(device)
total_params = sum(p.numel() for p in model.parameters())
print(f" Parameters : {total_params:,}")
# ---------------------------------------------------------------------------
# Generation helper
# ---------------------------------------------------------------------------
def generate(
prompt: str = PROMPT,
max_new_tokens: int = MAX_NEW_TOKENS,
temperature: float = TEMPERATURE,
top_p: float = TOP_P,
top_k: int = TOP_K,
repetition_penalty: float = REPETITION_PENALTY,
do_sample: bool = DO_SAMPLE,
) -> str:
bos = tokenizer.bos_token or ""
full_prompt = bos + prompt
inputs = tokenizer(
full_prompt,
return_tensors="pt",
add_special_tokens=False,
).to(device)
inputs.pop("token_type_ids", None) # Qwen3 doesn't use this
gen_kwargs = dict(
max_new_tokens = max_new_tokens,
do_sample = do_sample,
repetition_penalty = repetition_penalty,
eos_token_id = tokenizer.eos_token_id,
pad_token_id = tokenizer.pad_token_id,
)
if do_sample:
gen_kwargs["temperature"] = temperature
gen_kwargs["top_p"] = top_p
gen_kwargs["top_k"] = top_k
with torch.inference_mode():
output_ids = model.generate(**inputs, **gen_kwargs)
# Strip the prompt tokens so we only return what was generated
prompt_len = inputs["input_ids"].shape[-1]
new_ids = output_ids[0][prompt_len:]
return tokenizer.decode(new_ids, skip_special_tokens=True)
# ---------------------------------------------------------------------------
# Run
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print(f"\nPrompt : {PROMPT!r}")
print("-" * 60)
output = generate(PROMPT)
print("Generated:")
print(output)
Citation
@misc{mcod-4.7m,
title = {MCOD-4.7M: Low Entropy Training; Hugging Face Model Configs},
author = {Harley-ml},
year = {2026},
url = {https://huggingface.co/Harley-ml/MCOD-4.7M}
}
- Downloads last month
- 40
