Text Generation
Transformers
Safetensors
minimax_m2
neuralmagic
redhat
llmcompressor
quantized
INT4
conversational
custom_code
compressed-tensors
Instructions to use RedHatAI/MiniMax-M2.5-quantized.w4a16 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use RedHatAI/MiniMax-M2.5-quantized.w4a16 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="RedHatAI/MiniMax-M2.5-quantized.w4a16", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("RedHatAI/MiniMax-M2.5-quantized.w4a16", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("RedHatAI/MiniMax-M2.5-quantized.w4a16", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use RedHatAI/MiniMax-M2.5-quantized.w4a16 with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "RedHatAI/MiniMax-M2.5-quantized.w4a16" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "RedHatAI/MiniMax-M2.5-quantized.w4a16", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/RedHatAI/MiniMax-M2.5-quantized.w4a16
- SGLang
How to use RedHatAI/MiniMax-M2.5-quantized.w4a16 with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "RedHatAI/MiniMax-M2.5-quantized.w4a16" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "RedHatAI/MiniMax-M2.5-quantized.w4a16", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "RedHatAI/MiniMax-M2.5-quantized.w4a16" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "RedHatAI/MiniMax-M2.5-quantized.w4a16", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use RedHatAI/MiniMax-M2.5-quantized.w4a16 with Docker Model Runner:
docker model run hf.co/RedHatAI/MiniMax-M2.5-quantized.w4a16
| { | |
| "schema_version": "0.2.2", | |
| "evaluation_id": "mmlu_pro_chat/RedHatAI/MiniMax-M2.5-quantized.w4a16/1777302998.11639", | |
| "evaluation_timestamp": "1777037460", | |
| "retrieved_timestamp": "1777302998.11639", | |
| "source_metadata": { | |
| "source_name": "lm-evaluation-harness", | |
| "source_type": "evaluation_run", | |
| "source_organization_name": "RedHatAI", | |
| "evaluator_relationship": "third_party" | |
| }, | |
| "eval_library": { | |
| "name": "lm_eval", | |
| "version": "0.4.12.dev0" | |
| }, | |
| "model_info": { | |
| "name": "RedHatAI/MiniMax-M2.5-quantized.w4a16", | |
| "id": "RedHatAI/MiniMax-M2.5-quantized.w4a16", | |
| "developer": "RedHatAI", | |
| "additional_details": { | |
| "model_args": "{'model': 'RedHatAI/MiniMax-M2.5-quantized.w4a16', 'max_length': 196608, 'base_url': 'http://0.0.0.0:8000/v1/chat/completions', 'num_concurrent': 28, 'max_retries': 3, 'tokenized_requests': False, 'tokenizer_backend': None, 'timeout': 2400}", | |
| "seed": "1234", | |
| "num_seeds_merged": "3" | |
| } | |
| }, | |
| "evaluation_results": [ | |
| { | |
| "evaluation_name": "mmlu_pro_chat_biology/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.9079497907949791, | |
| "details": { | |
| "seed_scores": "[0.9093444909344491, 0.9065550906555091, 0.9079497907949791]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0008052305009618233, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_business/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8719898605830165, | |
| "details": { | |
| "seed_scores": "[0.8694550063371356, 0.8719898605830165, 0.8745247148288974]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0014634987812158046, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_chemistry/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8695524146054181, | |
| "details": { | |
| "seed_scores": "[0.8692579505300353, 0.8719081272084805, 0.8674911660777385]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0012835391470967918, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_computer_science/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8601626016260162, | |
| "details": { | |
| "seed_scores": "[0.8658536585365854, 0.8609756097560975, 0.8536585365853658]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.003543820279301364, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_economics/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8499210110584517, | |
| "details": { | |
| "seed_scores": "[0.8542654028436019, 0.8483412322274881, 0.8471563981042654]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0021989590690481933, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_engineering/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.7282421740626075, | |
| "details": { | |
| "seed_scores": "[0.7347781217750258, 0.7213622291021672, 0.7285861713106295]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.003876652105120269, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_health/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.7921760391198044, | |
| "details": { | |
| "seed_scores": "[0.7946210268948656, 0.7970660146699267, 0.784841075794621]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0037347805174864275, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_history/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.6815398075240595, | |
| "details": { | |
| "seed_scores": "[0.6850393700787402, 0.6797900262467191, 0.6797900262467191]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0017497812773403416, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_law/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.5812897366030881, | |
| "details": { | |
| "seed_scores": "[0.5876475930971844, 0.5767484105358764, 0.5794732061762035]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0032747967987865603, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_math/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.9346163335800641, | |
| "details": { | |
| "seed_scores": "[0.9333826794966691, 0.9370836417468542, 0.9333826794966691]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0012336540833950418, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_other/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.7853535353535354, | |
| "details": { | |
| "seed_scores": "[0.7954545454545454, 0.775974025974026, 0.7846320346320347]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.005635100776267413, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_philosophy/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.7247828991315964, | |
| "details": { | |
| "seed_scores": "[0.7234468937875751, 0.7274549098196392, 0.7234468937875751]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0013360053440213775, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_physics/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8734924300744162, | |
| "details": { | |
| "seed_scores": "[0.8775981524249422, 0.8729792147806005, 0.8698999230177059]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.002237053602022402, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat_psychology/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "hf_dataset", | |
| "hf_repo": "TIGER-Lab/MMLU-Pro", | |
| "hf_split": "test" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8074352548036758, | |
| "details": { | |
| "seed_scores": "[0.7944862155388471, 0.8233082706766918, 0.8045112781954887]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.008447681042671974, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| }, | |
| "generation_config": { | |
| "generation_args": { | |
| "temperature": 1.0, | |
| "top_p": 0.95, | |
| "top_k": 40.0, | |
| "max_tokens": 64000, | |
| "max_attempts": 1 | |
| }, | |
| "additional_details": { | |
| "until": "[]", | |
| "do_sample": "true", | |
| "min_p": "0.0", | |
| "presence_penalty": "1.5", | |
| "repetition_penalty": "1.0", | |
| "seed": "1234", | |
| "num_fewshot": "0" | |
| } | |
| } | |
| }, | |
| { | |
| "evaluation_name": "mmlu_pro_chat/custom-extract", | |
| "source_data": { | |
| "dataset_name": "mmlu_pro_chat", | |
| "source_type": "other" | |
| }, | |
| "evaluation_timestamp": "1777088443", | |
| "metric_config": { | |
| "evaluation_description": "exact_match (filter: custom-extract)", | |
| "lower_is_better": false, | |
| "score_type": "continuous", | |
| "min_score": 0.0, | |
| "max_score": 1.0 | |
| }, | |
| "score_details": { | |
| "score": 0.8124722960992908, | |
| "details": { | |
| "seed_scores": "[0.8144115691489362, 0.8125, 0.8105053191489362]", | |
| "seed_values": "[1234, 4158, 42]" | |
| }, | |
| "uncertainty": { | |
| "standard_error": { | |
| "value": 0.0011277223203151384, | |
| "method": "across_seeds" | |
| }, | |
| "num_samples": 3 | |
| } | |
| } | |
| } | |
| ] | |
| } | |