| { | |
| "model_type": "gpt2", | |
| "architectures": [ | |
| "GPT2LMHeadModel" | |
| ], | |
| "vocab_size": 16000, | |
| "d_model": 576, | |
| "num_layers": 13, | |
| "num_heads": 16, | |
| "d_ff": 1280, | |
| "max_seq_len": 1024, | |
| "dropout": 0.1, | |
| "pad_token_id": 0, | |
| "eos_token_id": 1, | |
| "unk_token_id": 2, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.36.0", | |
| "task": "text-generation", | |
| "pipeline_tag": "text-generation", | |
| "library_name": "transformers", | |
| "license": "apache-2.0", | |
| "language": ["en", "code"], | |
| "tags": [ | |
| "transformers", | |
| "pytorch", | |
| "safetensors", | |
| "text-generation", | |
| "code-generation", | |
| "python", | |
| "javascript", | |
| "coding", | |
| "programming", | |
| "sagemaker", | |
| "amazon-sagemaker", | |
| "cpu", | |
| "compact", | |
| "efficient", | |
| "nvdya-kit", | |
| "death-legion", | |
| "vllm", | |
| "sglang", | |
| "llama-cpp", | |
| "ollama", | |
| "lm-studio", | |
| "year-2026", | |
| "next-gen" | |
| ], | |
| "datasets": ["the-stack-v2"], | |
| "metrics": ["perplexity", "accuracy"], | |
| "inference": { | |
| "parameters": { | |
| "temperature": 0.8, | |
| "top_p": 0.95, | |
| "top_k": 50, | |
| "max_new_tokens": 200 | |
| } | |
| }, | |
| "sagemaker": { | |
| "sdk_version": "2.200.0", | |
| "instance_type": "ml.m5.large", | |
| "instance_count": 1, | |
| "container_image": "huggingface-pytorch-inference:2.0.0-transformers4.28.1-cpu-py310-ubuntu20.04-v1.0" | |
| } | |
| } | |